From e67e8bc68790de1c1cb4ca0ef958463c4ac87e7f Mon Sep 17 00:00:00 2001 From: Theo Date: Mon, 13 Jan 2020 16:16:58 +0100 Subject: [PATCH 01/10] [software] imageMatching: add "methods" param with new sequential option The new sequential option can be configured with nbNeighbors. --- src/software/pipeline/main_imageMatching.cpp | 400 ++++++++++++------- 1 file changed, 252 insertions(+), 148 deletions(-) diff --git a/src/software/pipeline/main_imageMatching.cpp b/src/software/pipeline/main_imageMatching.cpp index c1b0ba6969..efe199ee3e 100644 --- a/src/software/pipeline/main_imageMatching.cpp +++ b/src/software/pipeline/main_imageMatching.cpp @@ -33,6 +33,7 @@ static const int DIMENSION = 128; using namespace aliceVision; +using namespace aliceVision::voctree; namespace po = boost::program_options; namespace fs = boost::filesystem; @@ -96,6 +97,65 @@ std::ostream& operator<<(std::ostream& os, const OrderedPairList & pl) return os; } +/** +* @brief Mode to select the type of image matching +*/ +enum class EImageMatchingMethod +{ + EXHAUSTIVE = 0, + VOCABULARYTREE = 1, + SEQUENTIAL = 2, + SEQUENTIAL_AND_VOCABULARYTREE = 3 +}; + +/** +*@brief convert an enum EImageMatchingMethod to its corresponding string +* +*/ +inline std::string EImageMatchingMethod_enumToString(EImageMatchingMethod m) +{ + switch(m) + { + case EImageMatchingMethod::EXHAUSTIVE: return "Exhaustive"; + case EImageMatchingMethod::VOCABULARYTREE: return "VocabularyTree"; + case EImageMatchingMethod::SEQUENTIAL: return "Sequential"; + case EImageMatchingMethod::SEQUENTIAL_AND_VOCABULARYTREE: return "SequentialAndVocabularyTree"; + } + throw std::out_of_range("Invalid EImageMatchingMethod enum: " + std::to_string(int(m))); +} + +/** +* @brief convert a string treeMode to its corresponding enum treeMode +* @param String +* @return EImageMatchingMethod +*/ +inline EImageMatchingMethod EImageMatchingMethod_stringToEnum(const std::string& m) +{ + std::string mode = m; + std::transform(mode.begin(), mode.end(), mode.begin(), ::tolower); + + if(mode == "exhaustive") return EImageMatchingMethod::EXHAUSTIVE; + if(mode == "vocabularytree") return EImageMatchingMethod::VOCABULARYTREE; + if(mode == "sequential") return EImageMatchingMethod::SEQUENTIAL; + if(mode == "sequentialandvocabularytree") return EImageMatchingMethod::SEQUENTIAL_AND_VOCABULARYTREE; + + throw std::out_of_range("Invalid EImageMatchingMethod: " + m); +} + +inline std::ostream& operator<<(std::ostream& os, EImageMatchingMethod m) +{ + return os << EImageMatchingMethod_enumToString(m); +} + +inline std::istream& operator>>(std::istream& in, EImageMatchingMethod& m) +{ + std::string token; + in >> token; + m = EImageMatchingMethod_stringToEnum(token); + return in; +} + + /** * @brief Mode to combine image matching between two SfMDatas */ @@ -107,7 +167,6 @@ enum class EImageMatchingMode A_A }; - /** * @brief get informations about each EImageMatchingMode * @return String @@ -213,6 +272,26 @@ void convertAllMatchesToPairList(const PairList &allMatches, std::size_t numMatc } } +void generateSequentialMatches(const sfmData::SfMData& sfmData, size_t nbMatches, OrderedPairList& outPairList) +{ + std::vector> sortedImagePaths; + sortedImagePaths.reserve(sfmData.getViews().size()); + for(const auto& vIt: sfmData.getViews()) + { + sortedImagePaths.emplace_back(vIt.second->getImagePath(), vIt.first); + } + std::sort(sortedImagePaths.begin(), sortedImagePaths.end()); + for(size_t i = 0; i < sortedImagePaths.size(); ++i) + { + for(size_t n = i+1, nMax = std::min(i+nbMatches+1, sortedImagePaths.size()); n < nMax; ++n) + { + size_t a = sortedImagePaths[i].second; + size_t b = sortedImagePaths[n].second; + outPairList[std::min(a, b)].insert(std::max(a, b)); + } + } +} + void generateAllMatchesInOneMap(const std::map& descriptorsFiles, OrderedPairList& outPairList) { for(const auto& descItA: descriptorsFiles) @@ -261,7 +340,6 @@ void generateAllMatchesBetweenTwoMap(const std::map& descri } } - void generateFromVoctree(PairList& allMatches, const std::map& descriptorsFiles, const aliceVision::voctree::Database& db, @@ -325,6 +403,142 @@ void generateFromVoctree(PairList& allMatches, } } +void conditionVocTree(std::string treeName, bool withWeights, std::string weightsName, const EImageMatchingMode matchingMode, std::vector& featuresFolders, + sfmData::SfMData& sfmDataA, std::size_t nbMaxDescriptors, std::string sfmDataFilenameA, sfmData::SfMData& sfmDataB, std::string sfmDataFilenameB, + const bool useMultiSfM, std::map& descriptorsFilesA, std::size_t numImageQuery, OrderedPairList& selectedPairs) +{ + if(treeName.empty()) + { + throw std::runtime_error("No vocabulary tree argument."); + } + + // load vocabulary tree + ALICEVISION_LOG_INFO("Loading vocabulary tree"); + + auto loadVoctree_start = std::chrono::steady_clock::now(); + aliceVision::voctree::VocabularyTree tree(treeName); + auto loadVoctree_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - loadVoctree_start); + { + std::stringstream ss; + ss << "tree loaded with:" << std::endl << "\t- " << tree.levels() << " levels" << std::endl; + ss << "\t- " << tree.splits() << " branching factor" << std::endl; + ss << "\tin " << loadVoctree_elapsed.count() << " seconds" << std::endl; + ALICEVISION_LOG_INFO(ss.str()); + } + + // create the databases + ALICEVISION_LOG_INFO("Creating the databases..."); + + // add each object (document) to the database + aliceVision::voctree::Database db(tree.words()); + aliceVision::voctree::Database db2; + + if(withWeights) + { + ALICEVISION_LOG_INFO("Loading weights..."); + db.loadWeights(weightsName); + } + else + { + ALICEVISION_LOG_INFO("No weights specified, skipping..."); + } + + if(matchingMode == EImageMatchingMode::A_A_AND_A_B) + db2 = db; // initialize database2 with database1 initialization + + // read the descriptors and populate the databases + { + std::stringstream ss; + + for(const std::string& featuresFolder : featuresFolders) + ss << "\t- " << featuresFolder << std::endl; + + ALICEVISION_LOG_INFO("Reading descriptors from: " << std::endl << ss.str()); + + std::size_t nbFeaturesLoadedInputA = 0; + std::size_t nbFeaturesLoadedInputB = 0; + std::size_t nbSetDescriptors = 0; + + auto detect_start = std::chrono::steady_clock::now(); + { + if((matchingMode == EImageMatchingMode::A_A_AND_A_B) || + (matchingMode == EImageMatchingMode::A_AB) || + (matchingMode == EImageMatchingMode::A_A)) + { + nbFeaturesLoadedInputA = voctree::populateDatabase(sfmDataA, featuresFolders, tree, db, nbMaxDescriptors); + nbSetDescriptors = db.getSparseHistogramPerImage().size(); + + if(nbFeaturesLoadedInputA == 0) + { + throw std::runtime_error("No descriptors loaded in '" + sfmDataFilenameA + "'"); + } + } + + if((matchingMode == EImageMatchingMode::A_AB) || + (matchingMode == EImageMatchingMode::A_B)) + { + nbFeaturesLoadedInputB = voctree::populateDatabase(sfmDataB, featuresFolders, tree, db, nbMaxDescriptors); + nbSetDescriptors = db.getSparseHistogramPerImage().size(); + } + + if(matchingMode == EImageMatchingMode::A_A_AND_A_B) + { + nbFeaturesLoadedInputB = voctree::populateDatabase(sfmDataB, featuresFolders, tree, db2, nbMaxDescriptors); + nbSetDescriptors += db2.getSparseHistogramPerImage().size(); + } + + if(useMultiSfM && (nbFeaturesLoadedInputB == 0)) + { + throw std::runtime_error("No descriptors loaded in '" + sfmDataFilenameB + "'"); + } + } + auto detect_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - detect_start); + + ALICEVISION_LOG_INFO("Read " << nbSetDescriptors << " sets of descriptors for a total of " << (nbFeaturesLoadedInputA + nbFeaturesLoadedInputB) << " features"); + ALICEVISION_LOG_INFO("Reading took " << detect_elapsed.count() << " sec."); + } + + if(!withWeights) + { + // compute and save the word weights + ALICEVISION_LOG_INFO("Computing weights..."); + + db.computeTfIdfWeights(); + + if(matchingMode == EImageMatchingMode::A_A_AND_A_B) + db2.computeTfIdfWeights(); + } + + { + PairList allMatches; + + ALICEVISION_LOG_INFO("Query all documents"); + + auto detect_start = std::chrono::steady_clock::now(); + + if(matchingMode == EImageMatchingMode::A_A_AND_A_B) + { + generateFromVoctree(allMatches, descriptorsFilesA, db, tree, EImageMatchingMode::A_A, nbMaxDescriptors, numImageQuery); + generateFromVoctree(allMatches, descriptorsFilesA, db2, tree, EImageMatchingMode::A_B, nbMaxDescriptors, numImageQuery); + } + else + { + generateFromVoctree(allMatches, descriptorsFilesA, db, tree, matchingMode, nbMaxDescriptors, numImageQuery); + } + + auto detect_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - detect_start); + ALICEVISION_LOG_INFO("Query all documents took " << detect_elapsed.count() << " sec."); + + // process pair list + detect_start = std::chrono::steady_clock::now(); + + ALICEVISION_LOG_INFO("Convert all matches to pairList"); + convertAllMatchesToPairList(allMatches, numImageQuery, selectedPairs); + detect_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - detect_start); + ALICEVISION_LOG_INFO("Convert all matches to pairList took " << detect_elapsed.count() << " sec."); + } +} + int main(int argc, char** argv) { // command-line parameters @@ -336,23 +550,26 @@ int main(int argc, char** argv) /// the folder(s) containing the extracted features with their associated descriptors std::vector featuresFolders; /// the filename of the voctree - std::string treeName; + std::string treeName="Vocabulary Tree"; /// the file in which to save the results std::string outputFile; // user optional parameters - + EImageMatchingMethod method = EImageMatchingMethod::VOCABULARYTREE; /// minimal number of images to use the vocabulary tree std::size_t minNbImages = 200; /// the file containing the list of features std::size_t nbMaxDescriptors = 500; - /// the number of matches to retrieve for each image + /// the number of matches to retrieve for each image in Vocabulary Tree Mode std::size_t numImageQuery = 50; + /// the number of neighbors to retrieve for each image in Sequential Mode + std::size_t numImageQuerySequential = 50; /// the filename for the voctree weights std::string weightsName; /// flag for the optional weights file bool withWeights = false; + // multiple SfM parameters /// a second file containing a list of features @@ -377,7 +594,7 @@ int main(int argc, char** argv) "SfMData file.") ("featuresFolders,f", po::value>(&featuresFolders)->multitoken()->required(), "Path to folder(s) containing the extracted features.") - ("tree,t", po::value(&treeName), + ("tree,t", po::value(&treeName)->default_value(treeName), "Input file path of the vocabulary tree. This file can be generated by createVoctree. " "This software is intended to be used with a generic, pre-trained vocabulary tree.") ("output,o", po::value(&outputFile)->required(), @@ -385,6 +602,8 @@ int main(int argc, char** argv) po::options_description optionalParams("Optional parameters"); optionalParams.add_options() + ("method", po::value(&method)->default_value(method), + "Method used to select the image pairs to match.") ("minNbImages", po::value(&minNbImages)->default_value(minNbImages), "Minimal number of images to use the vocabulary tree. If we have less images than this threshold, we will compute all matching combinations.") ("maxDescriptors", po::value(&nbMaxDescriptors)->default_value(nbMaxDescriptors), @@ -392,6 +611,9 @@ int main(int argc, char** argv) ("nbMatches", po::value(&numImageQuery)->default_value(numImageQuery), "The number of matches to retrieve for each image (If 0 it will " "retrieve all the matches).") + ("nbNeighbors", po::value(&numImageQuerySequential)->default_value(numImageQuerySequential), + "The number of neighbors to retrieve for each image (If 0 it will " + "retrieve all the neighbors).") ("weights,w", po::value(&weightsName), "Input name for the vocabulary tree weight file, if not provided all voctree leaves will have the same weight."); @@ -488,156 +710,38 @@ int main(int argc, char** argv) if(useMultiSfM) aliceVision::voctree::getListOfDescriptorFiles(sfmDataB, featuresFolders, descriptorsFilesB); - if(treeName.empty() && (descriptorsFilesA.size() + descriptorsFilesB.size()) > 200) - ALICEVISION_LOG_WARNING("No vocabulary tree argument, so it will use the brute force approach which can be compute intensive for aliceVision_featureMatching."); - - if(treeName.empty() || (descriptorsFilesA.size() + descriptorsFilesB.size()) < minNbImages) - { - ALICEVISION_LOG_INFO("Brute force generation"); - - if((matchingMode == EImageMatchingMode::A_A_AND_A_B) || - (matchingMode == EImageMatchingMode::A_AB) || - (matchingMode == EImageMatchingMode::A_A)) - generateAllMatchesInOneMap(descriptorsFilesA, selectedPairs); - - if((matchingMode == EImageMatchingMode::A_A_AND_A_B) || - (matchingMode == EImageMatchingMode::A_AB) || - (matchingMode == EImageMatchingMode::A_B)) - generateAllMatchesBetweenTwoMap(descriptorsFilesA, descriptorsFilesB, selectedPairs); - } - - // if selectedPairs is not already computed by a brute force approach, - // we compute it with the vocabulary tree approach. - if(selectedPairs.empty()) + switch(method) { - // load vocabulary tree - ALICEVISION_LOG_INFO("Loading vocabulary tree"); - - auto loadVoctree_start = std::chrono::steady_clock::now(); - aliceVision::voctree::VocabularyTree tree(treeName); - auto loadVoctree_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - loadVoctree_start); - { - std::stringstream ss; - ss << "tree loaded with:" << std::endl << "\t- " << tree.levels() << " levels" << std::endl; - ss << "\t- " << tree.splits() << " branching factor" << std::endl; - ss << "\tin " << loadVoctree_elapsed.count() << " seconds" << std::endl; - ALICEVISION_LOG_INFO(ss.str()); - } - - // create the databases - ALICEVISION_LOG_INFO("Creating the databases..."); - - // add each object (document) to the database - aliceVision::voctree::Database db(tree.words()); - aliceVision::voctree::Database db2; - - if(withWeights) - { - ALICEVISION_LOG_INFO("Loading weights..."); - db.loadWeights(weightsName); - } - else + case EImageMatchingMethod::EXHAUSTIVE: { - ALICEVISION_LOG_INFO("No weights specified, skipping..."); + if((matchingMode == EImageMatchingMode::A_A_AND_A_B) || + (matchingMode == EImageMatchingMode::A_AB) || + (matchingMode == EImageMatchingMode::A_A)) + generateAllMatchesInOneMap(descriptorsFilesA, selectedPairs); + + if((matchingMode == EImageMatchingMode::A_A_AND_A_B) || + (matchingMode == EImageMatchingMode::A_AB) || + (matchingMode == EImageMatchingMode::A_B)) + generateAllMatchesBetweenTwoMap(descriptorsFilesA, descriptorsFilesB, selectedPairs); + break; } - - if(matchingMode == EImageMatchingMode::A_A_AND_A_B) - db2 = db; // initialize database2 with database1 initialization - - // read the descriptors and populate the databases + case EImageMatchingMethod::VOCABULARYTREE: { - std::stringstream ss; - - for(const std::string& featuresFolder : featuresFolders) - ss << "\t- " << featuresFolder << std::endl; - - ALICEVISION_LOG_INFO("Reading descriptors from: " << std::endl << ss.str()); - - std::size_t nbFeaturesLoadedInputA = 0; - std::size_t nbFeaturesLoadedInputB = 0; - std::size_t nbSetDescriptors = 0; - - auto detect_start = std::chrono::steady_clock::now(); - { - - if((matchingMode == EImageMatchingMode::A_A_AND_A_B) || - (matchingMode == EImageMatchingMode::A_AB) || - (matchingMode == EImageMatchingMode::A_A)) - { - nbFeaturesLoadedInputA = voctree::populateDatabase(sfmDataA, featuresFolders, tree, db, nbMaxDescriptors); - nbSetDescriptors = db.getSparseHistogramPerImage().size(); - - if(nbFeaturesLoadedInputA == 0) - { - ALICEVISION_LOG_ERROR("No descriptors loaded in '" + sfmDataFilenameA + "'"); - return EXIT_FAILURE; - } - } - - if((matchingMode == EImageMatchingMode::A_AB) || - (matchingMode == EImageMatchingMode::A_B)) - { - nbFeaturesLoadedInputB = voctree::populateDatabase(sfmDataB, featuresFolders, tree, db, nbMaxDescriptors); - nbSetDescriptors = db.getSparseHistogramPerImage().size(); - } - - if(matchingMode == EImageMatchingMode::A_A_AND_A_B) - { - nbFeaturesLoadedInputB = voctree::populateDatabase(sfmDataB, featuresFolders, tree, db2, nbMaxDescriptors); - nbSetDescriptors += db2.getSparseHistogramPerImage().size(); - } - - if(useMultiSfM && (nbFeaturesLoadedInputB == 0)) - { - ALICEVISION_LOG_ERROR("No descriptors loaded in '" + sfmDataFilenameB + "'"); - return EXIT_FAILURE; - } - } - - auto detect_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - detect_start); - - ALICEVISION_LOG_INFO("Read " << nbSetDescriptors << " sets of descriptors for a total of " << (nbFeaturesLoadedInputA + nbFeaturesLoadedInputB) << " features"); - ALICEVISION_LOG_INFO("Reading took " << detect_elapsed.count() << " sec."); + conditionVocTree(treeName, withWeights, weightsName, matchingMode,featuresFolders, sfmDataA, nbMaxDescriptors, sfmDataFilenameA, sfmDataB, + sfmDataFilenameB, useMultiSfM, descriptorsFilesA, numImageQuery, selectedPairs); + break; } - - if(!withWeights) + case EImageMatchingMethod::SEQUENTIAL: { - // compute and save the word weights - ALICEVISION_LOG_INFO("Computing weights..."); - - db.computeTfIdfWeights(); - - if(matchingMode == EImageMatchingMode::A_A_AND_A_B) - db2.computeTfIdfWeights(); + generateSequentialMatches(sfmDataA, numImageQuerySequential, selectedPairs); + break; } - + case EImageMatchingMethod::SEQUENTIAL_AND_VOCABULARYTREE: { - PairList allMatches; - - ALICEVISION_LOG_INFO("Query all documents"); - - auto detect_start = std::chrono::steady_clock::now(); - - if(matchingMode == EImageMatchingMode::A_A_AND_A_B) - { - generateFromVoctree(allMatches, descriptorsFilesA, db, tree, EImageMatchingMode::A_A, nbMaxDescriptors, numImageQuery); - generateFromVoctree(allMatches, descriptorsFilesA, db2, tree, EImageMatchingMode::A_B, nbMaxDescriptors, numImageQuery); - } - else - { - generateFromVoctree(allMatches, descriptorsFilesA, db, tree, matchingMode, nbMaxDescriptors, numImageQuery); - } - - auto detect_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - detect_start); - ALICEVISION_LOG_INFO("Query all documents took " << detect_elapsed.count() << " sec."); - - // process pair list - detect_start = std::chrono::steady_clock::now(); - - ALICEVISION_LOG_INFO("Convert all matches to pairList"); - convertAllMatchesToPairList(allMatches, numImageQuery, selectedPairs); - detect_elapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - detect_start); - ALICEVISION_LOG_INFO("Convert all matches to pairList took " << detect_elapsed.count() << " sec."); + generateSequentialMatches(sfmDataA, numImageQuerySequential, selectedPairs); + conditionVocTree(treeName, withWeights, weightsName, matchingMode,featuresFolders, sfmDataA, nbMaxDescriptors, sfmDataFilenameA, sfmDataB, + sfmDataFilenameB, useMultiSfM, descriptorsFilesA, numImageQuery, selectedPairs); + break; } } From d8d68169521f0b7cbe6e9a95c48ebb9add73b956 Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 14 Jan 2020 12:14:30 +0100 Subject: [PATCH 02/10] [software] ImageMatching : Add method using frustum intersections --- src/software/pipeline/CMakeLists.txt | 1 + src/software/pipeline/main_imageMatching.cpp | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/software/pipeline/CMakeLists.txt b/src/software/pipeline/CMakeLists.txt index aae7ae4147..37952ca4eb 100644 --- a/src/software/pipeline/CMakeLists.txt +++ b/src/software/pipeline/CMakeLists.txt @@ -46,6 +46,7 @@ if(ALICEVISION_BUILD_SFM) SOURCE main_imageMatching.cpp FOLDER ${FOLDER_SOFTWARE_PIPELINE} LINKS aliceVision_system + aliceVision_sfm aliceVision_sfmData aliceVision_sfmDataIO aliceVision_voctree diff --git a/src/software/pipeline/main_imageMatching.cpp b/src/software/pipeline/main_imageMatching.cpp index efe199ee3e..4f21666f22 100644 --- a/src/software/pipeline/main_imageMatching.cpp +++ b/src/software/pipeline/main_imageMatching.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -105,7 +106,8 @@ enum class EImageMatchingMethod EXHAUSTIVE = 0, VOCABULARYTREE = 1, SEQUENTIAL = 2, - SEQUENTIAL_AND_VOCABULARYTREE = 3 + SEQUENTIAL_AND_VOCABULARYTREE = 3, + FRUSTUM=4 }; /** @@ -120,6 +122,7 @@ inline std::string EImageMatchingMethod_enumToString(EImageMatchingMethod m) case EImageMatchingMethod::VOCABULARYTREE: return "VocabularyTree"; case EImageMatchingMethod::SEQUENTIAL: return "Sequential"; case EImageMatchingMethod::SEQUENTIAL_AND_VOCABULARYTREE: return "SequentialAndVocabularyTree"; + case EImageMatchingMethod::FRUSTUM: return "Frustum"; } throw std::out_of_range("Invalid EImageMatchingMethod enum: " + std::to_string(int(m))); } @@ -138,6 +141,7 @@ inline EImageMatchingMethod EImageMatchingMethod_stringToEnum(const std::string& if(mode == "vocabularytree") return EImageMatchingMethod::VOCABULARYTREE; if(mode == "sequential") return EImageMatchingMethod::SEQUENTIAL; if(mode == "sequentialandvocabularytree") return EImageMatchingMethod::SEQUENTIAL_AND_VOCABULARYTREE; + if(mode == "frustum") return EImageMatchingMethod::FRUSTUM; throw std::out_of_range("Invalid EImageMatchingMethod: " + m); } @@ -743,6 +747,15 @@ int main(int argc, char** argv) sfmDataFilenameB, useMultiSfM, descriptorsFilesA, numImageQuery, selectedPairs); break; } + case EImageMatchingMethod::FRUSTUM: + { + PairSet pairs = sfm::FrustumFilter(sfmDataA).getFrustumIntersectionPairs(); + for(PairSet::iterator it=pairs.begin(); it != pairs.end(); it++) + { + selectedPairs[it->first].insert(it->second); + } + break; + } } // check if the output folder exists From 5da54c738284e4129acd83fbd8ac74b4a55041b5 Mon Sep 17 00:00:00 2001 From: Theo Date: Fri, 17 Jan 2020 16:52:52 +0100 Subject: [PATCH 03/10] [software] featureMatching: add options matchFromKnownCameraPoses with knownPosesGeometricErrorMax --- .../StructureEstimationFromKnownPoses.cpp | 27 +++---- .../StructureEstimationFromKnownPoses.hpp | 14 ++-- .../main_computeStructureFromKnownPoses.cpp | 9 ++- .../pipeline/main_featureMatching.cpp | 77 +++++++++++++++---- 4 files changed, 90 insertions(+), 37 deletions(-) diff --git a/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.cpp b/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.cpp index b882de0bf6..94a0993b8d 100644 --- a/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.cpp +++ b/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.cpp @@ -58,23 +58,25 @@ void PointsToMat( } /// Use geometry of the views to compute a putative structure from features and descriptors. -void StructureEstimationFromKnownPoses::run( - SfMData& sfmData, +void StructureEstimationFromKnownPoses::run(SfMData& sfmData, const PairSet& pairs, - const feature::RegionsPerView& regionsPerView) + const feature::RegionsPerView& regionsPerView, + double geometricErrorMax) { sfmData.structure.clear(); - match(sfmData, pairs, regionsPerView); + match(sfmData, pairs, regionsPerView, geometricErrorMax); filter(sfmData, pairs, regionsPerView); triangulate(sfmData, regionsPerView); } +// #define ALICEVISION_EXHAUSTIVE_MATCHING + /// Use guided matching to find corresponding 2-view correspondences -void StructureEstimationFromKnownPoses::match( - const SfMData& sfmData, +void StructureEstimationFromKnownPoses::match(const SfMData& sfmData, const PairSet& pairs, - const feature::RegionsPerView& regionsPerView) + const feature::RegionsPerView& regionsPerView, + double geometricErrorMax) { boost::progress_display my_progress_bar( pairs.size(), std::cout, "Compute pairwise fundamental guided matching:\n" ); @@ -104,23 +106,22 @@ void StructureEstimationFromKnownPoses::match( const Mat34 P_R = iterIntrinsicR->second.get()->get_projective_equivalent(poseR); const Mat3 F_lr = F_from_P(P_L, P_R); - const double thresholdF = 4.0; std::vector commonDescTypes = regionsPerView.getCommonDescTypes(*it); matching::MatchesPerDescType allImagePairMatches; for(feature::EImageDescriberType descType: commonDescTypes) { std::vector matches; - #ifdef EXHAUSTIVE_MATCHING + #ifdef ALICEVISION_EXHAUSTIVE_MATCHING robustEstimation::GuidedMatching ( F_lr, iterIntrinsicL->second.get(), - regionsPerView.getRegions(it->first), + regionsPerView.getRegions(it->first, descType), iterIntrinsicR->second.get(), - regionsPerView.getRegions(it->second), - descType, + regionsPerView.getRegions(it->second, descType), + // descType, Square(thresholdF), Square(0.8), matches ); @@ -139,7 +140,7 @@ void StructureEstimationFromKnownPoses::match( regionsPerView.getRegions(it->second, descType), iterIntrinsicR->second->w(), iterIntrinsicR->second->h(), //descType, - Square(thresholdF), Square(0.8), + Square(geometricErrorMax), Square(0.8), matches ); #endif diff --git a/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.hpp b/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.hpp index 28434439a9..e17a3f7281 100644 --- a/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.hpp +++ b/src/aliceVision/sfm/pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.hpp @@ -19,18 +19,18 @@ class StructureEstimationFromKnownPoses public: /// Use geometry of the views to compute a putative structure from features and descriptors. - void run( - sfmData::SfMData& sfmData, + void run(sfmData::SfMData& sfmData, const PairSet& pairs, - const feature::RegionsPerView& regionsPerView); + const feature::RegionsPerView& regionsPerView, + double geometricErrorMax); public: /// Use guided matching to find corresponding 2-view correspondences - void match( - const sfmData::SfMData& sfmData, + void match(const sfmData::SfMData& sfmData, const PairSet& pairs, - const feature::RegionsPerView& regionsPerView); + const feature::RegionsPerView& regionsPerView, + double geometricErrorMax); /// Filter inconsistent correspondences by using 3-view correspondences on view triplets void filter( @@ -43,6 +43,8 @@ class StructureEstimationFromKnownPoses sfmData::SfMData& sfmData, const feature::RegionsPerView& regionsPerView); + const matching::PairwiseMatches& getPutativesMatches() const { return _putativeMatches; } + private: //-- // DATA (temporary) diff --git a/src/software/pipeline/main_computeStructureFromKnownPoses.cpp b/src/software/pipeline/main_computeStructureFromKnownPoses.cpp index 5e1d8a0c50..2509c23877 100644 --- a/src/software/pipeline/main_computeStructureFromKnownPoses.cpp +++ b/src/software/pipeline/main_computeStructureFromKnownPoses.cpp @@ -37,7 +37,7 @@ int main(int argc, char **argv) std::string sfmDataFilename; std::string outSfMDataFilename; std::vector featuresFolders; - + double geometricErrorMax = 5.0; // user optional parameters std::string describerTypesName = feature::EImageDescriberType_enumToString(feature::EImageDescriberType::SIFT); @@ -59,7 +59,10 @@ int main(int argc, char **argv) ("describerTypes,d", po::value(&describerTypesName)->default_value(describerTypesName), feature::EImageDescriberType_informations().c_str()) ("matchesFolders,m", po::value>(&matchesFolders)->multitoken()->required(), - "Path to folder(s) in which computed matches are stored."); + "Path to folder(s) in which computed matches are stored.") + ("geometricErrorMax", po::value(&geometricErrorMax)->default_value(geometricErrorMax), + "Maximum error (in pixels) allowed for features matching during geometric verification for known camera poses. " + "If set to 0 it lets the ACRansac select an optimal value."); po::options_description logParams("Log parameters"); logParams.add_options() @@ -152,7 +155,7 @@ int main(int argc, char **argv) // compute Structure from known camera poses sfm::StructureEstimationFromKnownPoses structureEstimator; - structureEstimator.match(sfmData, pairs, regionsPerView); + structureEstimator.match(sfmData, pairs, regionsPerView, geometricErrorMax); // unload descriptors before triangulation regionsPerView.clearDescriptors(); diff --git a/src/software/pipeline/main_featureMatching.cpp b/src/software/pipeline/main_featureMatching.cpp index feb4e9a888..c95dc5636a 100644 --- a/src/software/pipeline/main_featureMatching.cpp +++ b/src/software/pipeline/main_featureMatching.cpp @@ -1,4 +1,4 @@ -// This file is part of the AliceVision project. +// This file is part of the AliceVision project. // Copyright (c) 2015 AliceVision contributors. // Copyright (c) 2012 openMVG contributors. // This Source Code Form is subject to the terms of the Mozilla Public License, @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -105,6 +106,7 @@ int main(int argc, char **argv) std::string nearestMatchingMethod = "ANN_L2"; robustEstimation::ERobustEstimator geometricEstimator = robustEstimation::ERobustEstimator::ACRANSAC; double geometricErrorMax = 0.0; //< the maximum reprojection error allowed for image matching with geometric validation + double knownPosesGeometricErrorMax = 4.0; bool savePutativeMatches = false; bool guidedMatching = false; int maxIteration = 2048; @@ -112,6 +114,7 @@ int main(int argc, char **argv) size_t numMatchesToKeep = 0; bool useGridSort = true; bool exportDebugFiles = false; + bool matchFromKnownCameraPoses = false; const std::string fileExtension = "txt"; po::options_description allParams( @@ -153,8 +156,14 @@ int main(int argc, char **argv) "* acransac: A-Contrario Ransac\n" "* loransac: LO-Ransac (only available for fundamental matrix). Need to set '--geometricError'") ("geometricError", po::value(&geometricErrorMax)->default_value(geometricErrorMax), - "Maximum error (in pixels) allowed for features matching during geometric verification. " - "If set to 0 it lets the ACRansac select an optimal value.") + "Maximum error (in pixels) allowed for features matching during geometric verification. " + "If set to 0 it lets the ACRansac select an optimal value.") + ("matchFromKnownCameraPoses", po::value(&matchFromKnownCameraPoses)->default_value(matchFromKnownCameraPoses), + "Enable the usage of geometric information from known camera poses to guide the feature matching. " + "If some cameras have unknown poses (so there is no geometric prior), the standard feature matching will be performed.") + ("knownPosesGeometricErrorMax", po::value(&knownPosesGeometricErrorMax)->default_value(knownPosesGeometricErrorMax), + "Maximum error (in pixels) allowed for features matching guided by geometric information from known camera poses. " + "If set to 0 it lets the ACRansac select an optimal value.") ("savePutativeMatches", po::value(&savePutativeMatches)->default_value(savePutativeMatches), "Save putative matches.") ("guidedMatching", po::value(&guidedMatching)->default_value(guidedMatching), @@ -242,7 +251,7 @@ int main(int argc, char **argv) // a. Load SfMData (image view & intrinsics data) SfMData sfmData; - if(!sfmDataIO::Load(sfmData, sfmDataFilename, sfmDataIO::ESfMData(sfmDataIO::VIEWS|sfmDataIO::INTRINSICS))) + if(!sfmDataIO::Load(sfmData, sfmDataFilename, sfmDataIO::ESfMData(sfmDataIO::VIEWS|sfmDataIO::INTRINSICS|sfmDataIO::EXTRINSICS))) { ALICEVISION_LOG_ERROR("The input SfMData file '" << sfmDataFilename << "' cannot be read."); return EXIT_FAILURE; @@ -283,8 +292,6 @@ int main(int argc, char **argv) filter.insert(pair.second); } - ALICEVISION_LOG_INFO("Putative matches"); - PairwiseMatches mapPutativesMatches; // allocate the right Matcher according the Matching requested method @@ -293,7 +300,9 @@ int main(int argc, char **argv) const std::vector describerTypes = feature::EImageDescriberType_stringToEnums(describerTypesName); - ALICEVISION_LOG_INFO("There are " + std::to_string(sfmData.getViews().size()) + " views and " + std::to_string(pairs.size()) + " image pairs."); + ALICEVISION_LOG_INFO("There are " << sfmData.getViews().size() << " views and " << pairs.size() << " image pairs."); + + ALICEVISION_LOG_INFO("Load features and descriptors"); // load the corresponding view regions RegionsPerView regionPerView; @@ -305,22 +314,60 @@ int main(int argc, char **argv) // perform the matching system::Timer timer; + PairSet pairsPoseKnown; + PairSet pairsPoseUnknown; + + if(matchFromKnownCameraPoses) + { + for(const auto& p: pairs) + { + if(sfmData.isPoseAndIntrinsicDefined(p.first) && sfmData.isPoseAndIntrinsicDefined(p.second)) + { + pairsPoseKnown.insert(p); + } + else + { + pairsPoseUnknown.insert(p); + } + } + } + else + { + pairsPoseUnknown = pairs; + } + + if(!pairsPoseKnown.empty()) + { + // compute matches from known camera poses when you have an initialization on the camera poses + ALICEVISION_LOG_INFO("Putative matches from known poses: " << pairsPoseKnown.size() << " image pairs."); + + sfm::StructureEstimationFromKnownPoses structureEstimator; + structureEstimator.match(sfmData, pairsPoseKnown, regionPerView, knownPosesGeometricErrorMax); + mapPutativesMatches = structureEstimator.getPutativesMatches(); + } - for(const feature::EImageDescriberType descType : describerTypes) + if(!pairsPoseUnknown.empty()) { - assert(descType != feature::EImageDescriberType::UNINITIALIZED); - ALICEVISION_LOG_INFO(EImageDescriberType_enumToString(descType) + " Regions Matching"); + ALICEVISION_LOG_INFO("Putative matches (unknown poses): " << pairsPoseUnknown.size() << " image pairs."); + // match feature descriptors between them without geometric notion + + for(const feature::EImageDescriberType descType : describerTypes) + { + assert(descType != feature::EImageDescriberType::UNINITIALIZED); + ALICEVISION_LOG_INFO(EImageDescriberType_enumToString(descType) + " Regions Matching"); + + // photometric matching of putative pairs + imageCollectionMatcher->Match(regionPerView, pairsPoseUnknown, descType, mapPutativesMatches); - // photometric matching of putative pairs - imageCollectionMatcher->Match(regionPerView, pairs, descType, mapPutativesMatches); + // TODO: DELI + // if(!guided_matching) regionPerView.clearDescriptors() + } - // TODO: DELI - // if(!guided_matching) regionPerView.clearDescriptors() } if(mapPutativesMatches.empty()) { - ALICEVISION_LOG_INFO("No putative matches."); + ALICEVISION_LOG_INFO("No putative feature matches."); // If we only compute a selection of matches, we may have no match. return rangeSize ? EXIT_SUCCESS : EXIT_FAILURE; } From c0ddcc9d4ffbcb03eed0139e1ad3fb69d51daaee Mon Sep 17 00:00:00 2001 From: Theo Date: Fri, 17 Jan 2020 17:18:08 +0100 Subject: [PATCH 04/10] [software] imageMatching : Add the method Frustum for a panorama scene --- src/aliceVision/geometry/Frustum.hpp | 77 +++++----- .../geometry/frustumIntersection_test.cpp | 144 +++++++++++++++++- src/aliceVision/sfm/FrustumFilter.cpp | 16 +- src/software/pipeline/main_imageMatching.cpp | 14 +- 4 files changed, 196 insertions(+), 55 deletions(-) diff --git a/src/aliceVision/geometry/Frustum.hpp b/src/aliceVision/geometry/Frustum.hpp index 3a5d3e374b..360bf32f9e 100644 --- a/src/aliceVision/geometry/Frustum.hpp +++ b/src/aliceVision/geometry/Frustum.hpp @@ -23,15 +23,17 @@ struct Frustum { Vec3 cones[5]; // camera centre and the 4 points that define the image plane Half_planes planes; // Define infinite frustum planes + 2 optional Near and Far Half Space - double z_near, z_far; + double z_near = -1., z_far = -1.; std::vector points; - Frustum() : z_near(-1.), z_far(-1.) {} + Frustum() {} // Build a frustum from the image size, camera intrinsic and pose - Frustum(const int w, const int h, const Mat3 & K, const Mat3 & R, const Vec3 & C) - : z_near(-1.), z_far(-1.) + Frustum(const int w, const int h, const Mat3 & K, const Mat3 & R, const Vec3 & C, const double zNear = -1.0, const double zFar = -1.0) + : z_near(zNear) + , z_far(zFar) { + // supporting point are the points defined by the truncated cone const Mat3 Kinv = K.inverse(); const Mat3 Rt = R.transpose(); @@ -48,40 +50,40 @@ struct Frustum planes.push_back( Half_plane_p(cones[0], cones[2], cones[3]) ); planes.push_back( Half_plane_p(cones[0], cones[3], cones[4]) ); - // supporting point for drawing is a normalized cone, since infinity cannot be represented - points = std::vector(&cones[0], &cones[0]+5); - } - - Frustum(const int w, const int h, const Mat3 & K, const Mat3 & R, const Vec3 & C, const double zNear, const double zFar) - { - *this = Frustum(w,h,K,R,C); - - // update near & far planes & clear set points - z_near = zNear; - z_far = zFar; - points.clear(); - assert(zFar >= zNear); - // Add Znear and ZFar half plane using the cam looking direction const Vec3 camLookDirection_n = R.row(2).normalized(); - const double d_near = - zNear - camLookDirection_n.dot(C); - planes.push_back( Half_plane(camLookDirection_n, d_near) ); - - const double d_Far = zFar + camLookDirection_n.dot(C); - planes.push_back( Half_plane(-camLookDirection_n, d_Far) ); - // supporting point are the points defined by the truncated cone - const Mat3 Kinv = K.inverse(); - const Mat3 Rt = R.transpose(); - points.push_back( Rt * (z_near * (Kinv * Vec3(0,0,1.0))) + C); - points.push_back( Rt * (z_near * (Kinv * Vec3(w,0,1.0))) + C); - points.push_back( Rt * (z_near * (Kinv * Vec3(w,h,1.0))) + C); - points.push_back( Rt * (z_near * (Kinv * Vec3(0,h,1.0))) + C); - - points.push_back( Rt * (z_far * (Kinv * Vec3(0,0,1.0))) + C); - points.push_back( Rt * (z_far * (Kinv * Vec3(w,0,1.0))) + C); - points.push_back( Rt * (z_far * (Kinv * Vec3(w,h,1.0))) + C); - points.push_back( Rt * (z_far * (Kinv * Vec3(0,h,1.0))) + C); + if(zNear > 0) + { + const double d_near = - zNear - camLookDirection_n.dot(C); + planes.push_back( Half_plane(camLookDirection_n, d_near) ); + + points.push_back( Rt * (z_near * (Kinv * Vec3(0,0,1.0))) + C); + points.push_back( Rt * (z_near * (Kinv * Vec3(w,0,1.0))) + C); + points.push_back( Rt * (z_near * (Kinv * Vec3(w,h,1.0))) + C); + points.push_back( Rt * (z_near * (Kinv * Vec3(0,h,1.0))) + C); + } + else + { + points.push_back(cones[0]); + } + if(zFar > 0) + { + const double d_Far = zFar + camLookDirection_n.dot(C); + planes.push_back( Half_plane(-camLookDirection_n, d_Far) ); + + points.push_back( Rt * (z_far * (Kinv * Vec3(0,0,1.0))) + C); + points.push_back( Rt * (z_far * (Kinv * Vec3(w,0,1.0))) + C); + points.push_back( Rt * (z_far * (Kinv * Vec3(w,h,1.0))) + C); + points.push_back( Rt * (z_far * (Kinv * Vec3(0,h,1.0))) + C); + } + else + { + points.push_back(cones[1]); + points.push_back(cones[2]); + points.push_back(cones[3]); + points.push_back(cones[4]); + } } /// Test if two frustums intersect or not @@ -107,6 +109,11 @@ struct Frustum return planes.size() == 6; } + bool isPartiallyTruncated() const + { + return planes.size() == 5; + } + // Return the supporting frustum points (5 for the infinite, 8 for the truncated) const std::vector & frustum_points() const { diff --git a/src/aliceVision/geometry/frustumIntersection_test.cpp b/src/aliceVision/geometry/frustumIntersection_test.cpp index 903fd129a4..f7a4085a38 100644 --- a/src/aliceVision/geometry/frustumIntersection_test.cpp +++ b/src/aliceVision/geometry/frustumIntersection_test.cpp @@ -20,7 +20,7 @@ using namespace aliceVision; using namespace aliceVision::geometry; using namespace aliceVision::geometry::halfPlane; -using namespace std; + //-- // Camera frustum intersection unit test @@ -81,6 +81,24 @@ BOOST_AUTO_TEST_CASE(intersection) for (int j = 0; j < iNviews; ++j) BOOST_CHECK(vec_frustum[i].intersect(vec_frustum[j])); } + + //Test with partially truncated frustum + { + //Build frustum with near and far plane defined by a different value + std::vector vec_frustum; + for (int i=0; i < iNviews; ++i) + { + vec_frustum.push_back( + Frustum(principal_Point*2, principal_Point*2, + d._K[i], d._R[i], d._C[i], 0.1, -1.0)); + BOOST_CHECK(vec_frustum[i].isPartiallyTruncated()); + } + + // Check that frustums have an overlap + for (int i = 0; i < iNviews; ++i) + for (int j = 0; j < iNviews; ++j) + BOOST_CHECK(vec_frustum[i].intersect(vec_frustum[j])); + } } BOOST_AUTO_TEST_CASE(empty_intersection) @@ -169,3 +187,127 @@ BOOST_AUTO_TEST_CASE(empty_intersection) } } } + +void createPanoramaScene(double coeff, NViewDataSet& d, const int iNviews, const int iNbPoints, const int principal_Point) +{ + // Create a panorama scene + //-- + // Create a panorama scene with a field of view + // depending on a coefficient and the number of views + // 1 camera looks to iNviews different directions + //-- + + double field_of_view = (360.0*coeff)/iNviews; + double focalRatio = 0.5/(tan(0.5*degreeToRadian(field_of_view))); + double focalLengthPix=focalRatio*2*principal_Point; + NViewDatasetConfigurator config(focalLengthPix, focalLengthPix, principal_Point, principal_Point, 1, 0); + d._n = iNviews; + d._K.resize(iNviews); + d._R.resize(iNviews); + d._t.resize(iNviews); + d._C.resize(iNviews); + + for (size_t i = 0; i < iNviews; ++i) + { + Vec3 camera_center(0., 0., 0.); + const double theta = i * 2 * M_PI / iNviews; + d._C[i] = camera_center; + // Circle + Vec3 lookdir(sin(theta), 0.0, cos(theta)); // Y axis UP + + d._K[i] << config._fx, 0, config._cx, + 0, config._fy, config._cy, + 0, 0, 1; + d._R[i] = LookAt(lookdir); // Y axis UP + d._t[i] = -d._R[i] * camera_center; // [t]=[-RC] Cf HZ. + } + +} + +BOOST_AUTO_TEST_CASE(panorama_intersection) +{ + // Create partially truncated frustum + //-- + // 1 camera looks to 4 different directions on a circle + // Create a panorama scene with a field of view + // more than 90° for each view which means no overlap + //-- + + const int principal_Point = 500; + // Setup a panorama camera rig: cameras rotations around a nodal point + const int iNviews = 4; + const int iNbPoints = 6; + NViewDataSet d; + double coeff = 1.2; // overlap coefficient: more than 1 means overlap + // create panorama scene + createPanoramaScene(coeff, d, iNviews, iNbPoints, principal_Point); + + //Test with partially truncated frustum + { + //Build frustum with near and far plane defined by a different value + std::vector vec_frustum; + for (int i=0; i < iNviews; ++i) + { + vec_frustum.push_back( + Frustum(principal_Point*2, principal_Point*2, + d._K[i], d._R[i], d._t[i], 0.1, -1.0)); + BOOST_CHECK(vec_frustum[i].isPartiallyTruncated()); + } + + //Check that there is overlap between all frustums + for (int i = 0; i < iNviews; ++i) + { + int j = (i+1) % iNviews; + BOOST_CHECK(vec_frustum[i].intersect(vec_frustum[j])); + + int k = (i-1+iNviews) % iNviews; + BOOST_CHECK(vec_frustum[i].intersect(vec_frustum[k])); + } + } +} + +BOOST_AUTO_TEST_CASE(panorama_without_intersection) +{ + // Create partially truncated frustum + //-- + // 1 camera looks to 4 different directions on a circle + // Create a panorama scene with a field of view + // less than 90° for each view which means no overlap + //-- + + const int principal_Point = 500; + // Setup a panorama camera rig: cameras rotations around a nodal point + const int iNviews = 4; + const int iNbPoints = 6; + NViewDataSet d; + double coeff = 0.8; // overlap coefficient: less than 1 means no overlap + + //create panorama scene + createPanoramaScene(coeff, d, iNviews, iNbPoints, principal_Point); + + //Test with partially truncated frustum + { + //Build frustum with near and far plane defined by a different value + std::vector vec_frustum; + for (int i=0; i < iNviews; ++i) + { + vec_frustum.push_back( + Frustum(principal_Point*2, principal_Point*2, + d._K[i], d._R[i], d._t[i], 0.1, -1.0)); + BOOST_CHECK(vec_frustum[i].isPartiallyTruncated()); + } + + //Check that there is no overlap between all frustums + for (int i = 0; i < iNviews; ++i) + { + for (int j = 0; j < iNviews; ++j) + { + if(i == j) + continue; + + BOOST_CHECK(!vec_frustum[i].intersect(vec_frustum[j])); + } + } + } +} + diff --git a/src/aliceVision/sfm/FrustumFilter.cpp b/src/aliceVision/sfm/FrustumFilter.cpp index 648eba8536..9cf7688b8d 100644 --- a/src/aliceVision/sfm/FrustumFilter.cpp +++ b/src/aliceVision/sfm/FrustumFilter.cpp @@ -53,19 +53,9 @@ void FrustumFilter::initFrustum(const sfmData::SfMData& sfmData) if (cam == nullptr) continue; - if (!_bTruncated) // use infinite frustum - { - const Frustum f( - cam->w(), cam->h(), cam->K(), - pose.rotation(), pose.center()); - frustum_perView[view->getViewId()] = f; - } - else // use truncated frustum with defined Near and Far planes - { - const Frustum f(cam->w(), cam->h(), cam->K(), - pose.rotation(), pose.center(), it->second.first, it->second.second); - frustum_perView[view->getViewId()] = f; - } + const Frustum f(cam->w(), cam->h(), cam->K(), + pose.rotation(), pose.center(), it->second.first, it->second.second); + frustum_perView[view->getViewId()] = f; } } diff --git a/src/software/pipeline/main_imageMatching.cpp b/src/software/pipeline/main_imageMatching.cpp index 4f21666f22..c7bf4f8587 100644 --- a/src/software/pipeline/main_imageMatching.cpp +++ b/src/software/pipeline/main_imageMatching.cpp @@ -749,12 +749,14 @@ int main(int argc, char** argv) } case EImageMatchingMethod::FRUSTUM: { - PairSet pairs = sfm::FrustumFilter(sfmDataA).getFrustumIntersectionPairs(); - for(PairSet::iterator it=pairs.begin(); it != pairs.end(); it++) - { - selectedPairs[it->first].insert(it->second); - } - break; + // For all cameras with valid extrinsic/intrinsic, we select the camera with common visibilities based on cameras' frustum. + // We use an epsilon near value for the frustum, to ensure that mulitple images with a pure rotation will not intersect at the nodal point. + PairSet pairs = sfm::FrustumFilter(sfmDataA, 0.01).getFrustumIntersectionPairs(); + for(PairSet::iterator it=pairs.begin(); it != pairs.end(); it++) + { + selectedPairs[it->first].insert(it->second); + } + break; } } From a83e281e860fda9845f1a572786b811acdddb083 Mon Sep 17 00:00:00 2001 From: Theo Date: Mon, 20 Jan 2020 10:18:00 +0100 Subject: [PATCH 05/10] [software] imageMatching: add const reference, use range-based for loop and clean cmd line params --- src/software/pipeline/main_imageMatching.cpp | 28 ++++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/software/pipeline/main_imageMatching.cpp b/src/software/pipeline/main_imageMatching.cpp index c7bf4f8587..0ea67b88f1 100644 --- a/src/software/pipeline/main_imageMatching.cpp +++ b/src/software/pipeline/main_imageMatching.cpp @@ -407,9 +407,9 @@ void generateFromVoctree(PairList& allMatches, } } -void conditionVocTree(std::string treeName, bool withWeights, std::string weightsName, const EImageMatchingMode matchingMode, std::vector& featuresFolders, - sfmData::SfMData& sfmDataA, std::size_t nbMaxDescriptors, std::string sfmDataFilenameA, sfmData::SfMData& sfmDataB, std::string sfmDataFilenameB, - const bool useMultiSfM, std::map& descriptorsFilesA, std::size_t numImageQuery, OrderedPairList& selectedPairs) +void conditionVocTree(const std::string& treeName, bool withWeights, const std::string& weightsName, const EImageMatchingMode matchingMode, const std::vector& featuresFolders, + const sfmData::SfMData& sfmDataA, std::size_t nbMaxDescriptors, const std::string& sfmDataFilenameA, const sfmData::SfMData& sfmDataB, const std::string& sfmDataFilenameB, + bool useMultiSfM, const std::map& descriptorsFilesA, std::size_t numImageQuery, OrderedPairList& selectedPairs) { if(treeName.empty()) { @@ -553,8 +553,6 @@ int main(int argc, char** argv) std::string sfmDataFilenameA; /// the folder(s) containing the extracted features with their associated descriptors std::vector featuresFolders; - /// the filename of the voctree - std::string treeName="Vocabulary Tree"; /// the file in which to save the results std::string outputFile; @@ -568,8 +566,10 @@ int main(int argc, char** argv) std::size_t numImageQuery = 50; /// the number of neighbors to retrieve for each image in Sequential Mode std::size_t numImageQuerySequential = 50; + /// the filename of the voctree + std::string treeFilepath; /// the filename for the voctree weights - std::string weightsName; + std::string weightsFilepath; /// flag for the optional weights file bool withWeights = false; @@ -598,9 +598,6 @@ int main(int argc, char** argv) "SfMData file.") ("featuresFolders,f", po::value>(&featuresFolders)->multitoken()->required(), "Path to folder(s) containing the extracted features.") - ("tree,t", po::value(&treeName)->default_value(treeName), - "Input file path of the vocabulary tree. This file can be generated by createVoctree. " - "This software is intended to be used with a generic, pre-trained vocabulary tree.") ("output,o", po::value(&outputFile)->required(), "Filepath to the output file with the list of selected image pairs."); @@ -618,7 +615,10 @@ int main(int argc, char** argv) ("nbNeighbors", po::value(&numImageQuerySequential)->default_value(numImageQuerySequential), "The number of neighbors to retrieve for each image (If 0 it will " "retrieve all the neighbors).") - ("weights,w", po::value(&weightsName), + ("tree,t", po::value(&treeFilepath)->default_value(treeFilepath), + "Input file path of the vocabulary tree. This file can be generated by 'createVoctree'. " + "This software is intended to be used with a generic, pre-trained vocabulary tree.") + ("weights,w", po::value(&weightsFilepath)->default_value(weightsFilepath), "Input name for the vocabulary tree weight file, if not provided all voctree leaves will have the same weight."); po::options_description multiSfMParams("Multiple SfM"); @@ -731,7 +731,7 @@ int main(int argc, char** argv) } case EImageMatchingMethod::VOCABULARYTREE: { - conditionVocTree(treeName, withWeights, weightsName, matchingMode,featuresFolders, sfmDataA, nbMaxDescriptors, sfmDataFilenameA, sfmDataB, + conditionVocTree(treeFilepath, withWeights, weightsFilepath, matchingMode,featuresFolders, sfmDataA, nbMaxDescriptors, sfmDataFilenameA, sfmDataB, sfmDataFilenameB, useMultiSfM, descriptorsFilesA, numImageQuery, selectedPairs); break; } @@ -743,7 +743,7 @@ int main(int argc, char** argv) case EImageMatchingMethod::SEQUENTIAL_AND_VOCABULARYTREE: { generateSequentialMatches(sfmDataA, numImageQuerySequential, selectedPairs); - conditionVocTree(treeName, withWeights, weightsName, matchingMode,featuresFolders, sfmDataA, nbMaxDescriptors, sfmDataFilenameA, sfmDataB, + conditionVocTree(treeFilepath, withWeights, weightsFilepath, matchingMode,featuresFolders, sfmDataA, nbMaxDescriptors, sfmDataFilenameA, sfmDataB, sfmDataFilenameB, useMultiSfM, descriptorsFilesA, numImageQuery, selectedPairs); break; } @@ -752,9 +752,9 @@ int main(int argc, char** argv) // For all cameras with valid extrinsic/intrinsic, we select the camera with common visibilities based on cameras' frustum. // We use an epsilon near value for the frustum, to ensure that mulitple images with a pure rotation will not intersect at the nodal point. PairSet pairs = sfm::FrustumFilter(sfmDataA, 0.01).getFrustumIntersectionPairs(); - for(PairSet::iterator it=pairs.begin(); it != pairs.end(); it++) + for(const auto& p: pairs) { - selectedPairs[it->first].insert(it->second); + selectedPairs[p.first].insert(p.second); } break; } From 978a7ded36c04b1eba1746c0d10414b39dce01ed Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 21 Jan 2020 14:34:54 +0100 Subject: [PATCH 06/10] [software] incrementalSfM : Add filterTrackForks as an option --- .../sequential/ReconstructionEngine_sequentialSfM.cpp | 7 ++----- .../sequential/ReconstructionEngine_sequentialSfM.hpp | 2 +- src/aliceVision/track/Track.cpp | 6 ++++-- src/aliceVision/track/Track.hpp | 5 +++-- src/aliceVision/track/track_test.cpp | 2 +- src/software/pipeline/main_incrementalSfM.cpp | 6 ++++-- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp index 815631ccb4..8df05ab80e 100644 --- a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp +++ b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp @@ -236,11 +236,8 @@ std::size_t ReconstructionEngine_sequentialSfM::fuseMatchesIntoTracks() ALICEVISION_LOG_DEBUG("Track building"); tracksBuilder.build(matches); - if(_params.useTrackFiltering) - { - ALICEVISION_LOG_DEBUG("Track filtering"); - tracksBuilder.filter(_params.minInputTrackLength); - } + ALICEVISION_LOG_DEBUG("Track filtering"); + tracksBuilder.filter(_params.filterTrackForks, _params.minInputTrackLength); ALICEVISION_LOG_DEBUG("Track export to internal structure"); // build tracks with STL compliant type diff --git a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp index 3e9ca3f2e5..1fdc52e067 100644 --- a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp +++ b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp @@ -57,7 +57,7 @@ class ReconstructionEngine_sequentialSfM : public ReconstructionEngine double maxReprojectionError = 4.0; float minAngleInitialPair = 5.0f; float maxAngleInitialPair = 40.0f; - bool useTrackFiltering = true; + bool filterTrackForks = true; robustEstimation::ERobustEstimator localizerEstimator = robustEstimation::ERobustEstimator::ACRANSAC; double localizerEstimatorError = std::numeric_limits::infinity(); size_t localizerEstimatorMaxIterations = 4096; diff --git a/src/aliceVision/track/Track.cpp b/src/aliceVision/track/Track.cpp index 08629c0b7c..8e42cfc91d 100644 --- a/src/aliceVision/track/Track.cpp +++ b/src/aliceVision/track/Track.cpp @@ -85,11 +85,13 @@ void TracksBuilder::build(const PairwiseMatches& pairwiseMatches) } } -void TracksBuilder::filter(std::size_t minTrackLength, bool multithreaded) +void TracksBuilder::filter(bool clearForks, std::size_t minTrackLength, bool multithreaded) { // remove bad tracks: // - track that are too short, // - track with id conflicts (many times the same image index) + if(!clearForks && minTrackLength == 0) + return; std::set set_classToErase; @@ -106,7 +108,7 @@ void TracksBuilder::filter(std::size_t minTrackLength, bool multithreaded) myset.insert(_map_nodeToIndex[iit].first); ++cpt; } - if(myset.size() != cpt || myset.size() < minTrackLength) + if((clearForks && myset.size() != cpt) || myset.size() < minTrackLength) { #pragma omp critical set_classToErase.insert(cit.operator int()); diff --git a/src/aliceVision/track/Track.hpp b/src/aliceVision/track/Track.hpp index 6c5dd3bedf..4491c81349 100644 --- a/src/aliceVision/track/Track.hpp +++ b/src/aliceVision/track/Track.hpp @@ -162,10 +162,11 @@ struct TracksBuilder /** * @brief Remove bad tracks (too short or track with ids collision) - * @param[in] minTrackLength + * @param[in] clearForks: remove tracks with multiple observation in a single image + * @param[in] minTrackLength: minimal number of observations to keep the track * @param[in] multithreaded Is multithreaded */ - void filter(std::size_t minTrackLength = 2, bool multithreaded = true); + void filter(bool clearForks = true, std::size_t minTrackLength = 2, bool multithreaded = true); /** * @brief Export to stream diff --git a/src/aliceVision/track/track_test.cpp b/src/aliceVision/track/track_test.cpp index 8f0af7c8ba..2a20507c14 100644 --- a/src/aliceVision/track/track_test.cpp +++ b/src/aliceVision/track/track_test.cpp @@ -112,7 +112,7 @@ BOOST_AUTO_TEST_CASE(Track_filter_3viewAtLeast) { TracksBuilder trackBuilder; trackBuilder.build( map_pairwisematches ); BOOST_CHECK_EQUAL(3, trackBuilder.nbTracks()); - trackBuilder.filter(3); + trackBuilder.filter(true, 3); BOOST_CHECK_EQUAL(2, trackBuilder.nbTracks()); } diff --git a/src/software/pipeline/main_incrementalSfM.cpp b/src/software/pipeline/main_incrementalSfM.cpp index 90f4407bec..9bba3aaa04 100644 --- a/src/software/pipeline/main_incrementalSfM.cpp +++ b/src/software/pipeline/main_incrementalSfM.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,7 @@ using namespace aliceVision; namespace po = boost::program_options; namespace fs = boost::filesystem; +using namespace aliceVision::track; /** * @brief Retrieve the view id in the sfmData from the image filename. @@ -152,8 +154,8 @@ int main(int argc, char **argv) ("useOnlyMatchesFromInputFolder", po::value(&useOnlyMatchesFromInputFolder)->default_value(useOnlyMatchesFromInputFolder), "Use only matches from the input matchesFolder parameter.\n" "Matches folders previously added to the SfMData file will be ignored.") - ("useTrackFiltering", po::value(&sfmParams.useTrackFiltering)->default_value(sfmParams.useTrackFiltering), - "Enable/Disable the track filtering.\n") + ("filterTrackForks", po::value(&sfmParams.filterTrackForks)->default_value(sfmParams.filterTrackForks), + "Enable/Disable the track forks removal. A track contains a fork when incoherent matches leads to multiple features in the same image for a single track.\n") ("useRigConstraint", po::value(&sfmParams.useRigConstraint)->default_value(sfmParams.useRigConstraint), "Enable/Disable rig constraint.\n") ("lockScenePreviouslyReconstructed", po::value(&lockScenePreviouslyReconstructed)->default_value(lockScenePreviouslyReconstructed), From 326cf4ad171e778f0e7c3bd6d3d56c6991a35e44 Mon Sep 17 00:00:00 2001 From: Theo Date: Tue, 21 Jan 2020 14:45:07 +0100 Subject: [PATCH 07/10] [software] incrementalSfM : Add the option minNbMatches --- src/aliceVision/matching/io.cpp | 17 +++++++++++------ src/aliceVision/matching/io.hpp | 3 ++- .../sfm/pipeline/pairwiseMatchesIO.hpp | 3 ++- src/software/pipeline/main_incrementalSfM.cpp | 6 +++++- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/aliceVision/matching/io.cpp b/src/aliceVision/matching/io.cpp index 367429c656..919d2bc5f2 100644 --- a/src/aliceVision/matching/io.cpp +++ b/src/aliceVision/matching/io.cpp @@ -99,18 +99,23 @@ void filterMatchesByViews( void filterTopMatches( PairwiseMatches & allMatches, - const int limitNum) + const int limitNum, + const int minNum) { - if (limitNum <= 0) + if (limitNum <= 0 && minNum <=0) return; + if (minNum > limitNum) + throw std::runtime_error("The minimum of matches is higher than the maximum of matches"); for(auto& matchesPerDesc: allMatches) { for(auto& matches: matchesPerDesc.second) { IndMatches& m = matches.second; - if (m.size() > limitNum) + if (limitNum > 0 && m.size() > limitNum) m.erase(m.begin()+limitNum, m.end()); + if (minNum > 0 && m.size() < minNum) + m.clear(); } } } @@ -232,7 +237,8 @@ bool Load( const std::set& viewsKeysFilter, const std::vector& folders, const std::vector& descTypesFilter, - const int maxNbMatches) + const int maxNbMatches, + const int minNbMatches) { std::size_t nbLoadedMatchFiles = 0; const std::string pattern = "matches.txt"; @@ -271,8 +277,7 @@ bool Load( if(!descTypesFilter.empty()) filterMatchesByDesc(matches, descTypesFilter); - if(maxNbMatches > 0) - filterTopMatches(matches, maxNbMatches); + filterTopMatches(matches, maxNbMatches, minNbMatches); ALICEVISION_LOG_TRACE("Matches per image pair (after filtering):"); logMatches(matches); diff --git a/src/aliceVision/matching/io.hpp b/src/aliceVision/matching/io.hpp index b56aaa0c5c..9e92555f39 100644 --- a/src/aliceVision/matching/io.hpp +++ b/src/aliceVision/matching/io.hpp @@ -50,7 +50,8 @@ bool Load(PairwiseMatches& matches, const std::set& viewsKeysFilter, const std::vector& folders, const std::vector& descTypesFilter, - const int maxNbMatches = 0); + const int maxNbMatches = 0, + const int minNbMatches =0); /** * @brief Filter to keep only specific viewIds. diff --git a/src/aliceVision/sfm/pipeline/pairwiseMatchesIO.hpp b/src/aliceVision/sfm/pipeline/pairwiseMatchesIO.hpp index 2aa14fd9a8..c1b1efd2f6 100644 --- a/src/aliceVision/sfm/pipeline/pairwiseMatchesIO.hpp +++ b/src/aliceVision/sfm/pipeline/pairwiseMatchesIO.hpp @@ -35,6 +35,7 @@ inline bool loadPairwiseMatches( const std::vector& folders, const std::vector& descTypes, const int maxNbMatches = 0, + const int minNbMatches = 0, bool useOnlyMatchesFromFolder = false) { std::vector matchesFolders; @@ -47,7 +48,7 @@ inline bool loadPairwiseMatches( matchesFolders.insert(matchesFolders.end(), folders.begin(), folders.end()); ALICEVISION_LOG_DEBUG("Loading matches"); - if (!matching::Load(out_pairwiseMatches, sfmData.getViewsKeys(), matchesFolders, descTypes, maxNbMatches)) + if (!matching::Load(out_pairwiseMatches, sfmData.getViewsKeys(), matchesFolders, descTypes, maxNbMatches, minNbMatches)) { std::stringstream ss("Unable to read the matches file(s) from:\n"); for(const std::string& folder : matchesFolders) diff --git a/src/software/pipeline/main_incrementalSfM.cpp b/src/software/pipeline/main_incrementalSfM.cpp index 9bba3aaa04..b556a3014c 100644 --- a/src/software/pipeline/main_incrementalSfM.cpp +++ b/src/software/pipeline/main_incrementalSfM.cpp @@ -87,6 +87,7 @@ int main(int argc, char **argv) sfm::ReconstructionEngine_sequentialSfM::Params sfmParams; bool lockScenePreviouslyReconstructed = true; int maxNbMatches = 0; + int minNbMatches = 0; bool useOnlyMatchesFromInputFolder = false; po::options_description allParams( @@ -118,6 +119,9 @@ int main(int argc, char **argv) ("maxNumberOfMatches", po::value(&maxNbMatches)->default_value(maxNbMatches), "Maximum number of matches per image pair (and per feature type). " "This can be useful to have a quick reconstruction overview. 0 means no limit.") + ("minNumberOfMatches", po::value(&minNbMatches)->default_value(minNbMatches), + "Minimum number of matches per image pair (and per feature type). " + "This can be useful to have a meaningful reconstruction with accurate keypoints. 0 means no limit.") ("minInputTrackLength", po::value(&sfmParams.minInputTrackLength)->default_value(sfmParams.minInputTrackLength), "Minimum track length in input of SfM.") ("minAngleForTriangulation", po::value(&sfmParams.minAngleForTriangulation)->default_value(sfmParams.minAngleForTriangulation), @@ -242,7 +246,7 @@ int main(int argc, char **argv) // matches reading matching::PairwiseMatches pairwiseMatches; - if(!sfm::loadPairwiseMatches(pairwiseMatches, sfmData, matchesFolders, describerTypes, maxNbMatches, useOnlyMatchesFromInputFolder)) + if(!sfm::loadPairwiseMatches(pairwiseMatches, sfmData, matchesFolders, describerTypes, maxNbMatches, minNbMatches, useOnlyMatchesFromInputFolder)) { ALICEVISION_LOG_ERROR("Unable to load matches."); return EXIT_FAILURE; From b3e7e5669e36a3361898ac09cc646cb72c1e3219 Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Thu, 30 Jan 2020 12:31:33 +0100 Subject: [PATCH 08/10] [matching] fixes for feature matches min/max limits Fix minNum/limitNum conditions. --- src/aliceVision/matching/io.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/aliceVision/matching/io.cpp b/src/aliceVision/matching/io.cpp index 919d2bc5f2..b16a274576 100644 --- a/src/aliceVision/matching/io.cpp +++ b/src/aliceVision/matching/io.cpp @@ -104,18 +104,18 @@ void filterTopMatches( { if (limitNum <= 0 && minNum <=0) return; - if (minNum > limitNum) - throw std::runtime_error("The minimum of matches is higher than the maximum of matches"); + if (limitNum > 0 && minNum > limitNum) + throw std::runtime_error("The minimum number of matches is higher than the maximum."); for(auto& matchesPerDesc: allMatches) { for(auto& matches: matchesPerDesc.second) { IndMatches& m = matches.second; - if (limitNum > 0 && m.size() > limitNum) - m.erase(m.begin()+limitNum, m.end()); if (minNum > 0 && m.size() < minNum) m.clear(); + else if (limitNum > 0 && m.size() > limitNum) + m.erase(m.begin()+limitNum, m.end()); } } } From 80a24b87a8190a09c505d7ca69ac553b225bb02a Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Thu, 30 Jan 2020 12:01:18 +0100 Subject: [PATCH 09/10] [sfm] remove trailing pragma --- src/aliceVision/sfm/pipeline/ReconstructionEngine.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/aliceVision/sfm/pipeline/ReconstructionEngine.cpp b/src/aliceVision/sfm/pipeline/ReconstructionEngine.cpp index 613e087df6..933c48aa05 100644 --- a/src/aliceVision/sfm/pipeline/ReconstructionEngine.cpp +++ b/src/aliceVision/sfm/pipeline/ReconstructionEngine.cpp @@ -4,8 +4,6 @@ // v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. -#pragma once - #include "ReconstructionEngine.hpp" #include From b592507aec17ca3a5c76dd7c319ec6d8f795a6c2 Mon Sep 17 00:00:00 2001 From: Fabien Castan Date: Thu, 30 Jan 2020 12:04:32 +0100 Subject: [PATCH 10/10] [software] SfM: features/matchesFolders are no more required When computing a SfM node from a previous SfM, the features/matches are not needed. --- src/software/pipeline/main_incrementalSfM.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/software/pipeline/main_incrementalSfM.cpp b/src/software/pipeline/main_incrementalSfM.cpp index b556a3014c..452c6c0a97 100644 --- a/src/software/pipeline/main_incrementalSfM.cpp +++ b/src/software/pipeline/main_incrementalSfM.cpp @@ -101,13 +101,14 @@ int main(int argc, char **argv) "SfMData file.") ("output,o", po::value(&outputSfM)->required(), "Path to the output SfMData file.") - ("featuresFolders,f", po::value>(&featuresFolders)->multitoken()->required(), - "Path to folder(s) containing the extracted features.") - ("matchesFolders,m", po::value>(&matchesFolders)->multitoken()->required(), - "Path to folder(s) in which computed matches are stored."); + ; po::options_description optionalParams("Optional parameters"); optionalParams.add_options() + ("featuresFolders,f", po::value>(&featuresFolders)->multitoken(), + "Path to folder(s) containing the extracted features.") + ("matchesFolders,m", po::value>(&matchesFolders)->multitoken(), + "Path to folder(s) in which computed matches are stored.") ("outputViewsAndPoses", po::value(&outputSfMViewsAndPoses)->default_value(outputSfMViewsAndPoses), "Path to the output SfMData file (with only views and poses).") ("extraInfoFolder", po::value(&extraInfoFolder)->default_value(extraInfoFolder),