-
Notifications
You must be signed in to change notification settings - Fork 4
/
main.cpp
137 lines (111 loc) · 5.42 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "featuredpa.h"
#include "DynamicFactorGraph.h"
using namespace cv;
using namespace std;
void stereoVision();
void factorGraph();
/** @function main */
int main( int argc, char** argv )
{
// stereoVision();
factorGraph();
return 0;
}
void factorGraph() {
DynamicFactorGraph graph;
// graph.initializeFactorGraph();
graph.loadInitialPoses("datasets/pose_output.txt");
graph.loadLandmarks("datasets/landmark_output.txt");
graph.loadIMUFactors("datasets/preIntegratedIMU.csv");
graph.loadIMUData("datasets/body_accel.csv","datasets/body_angvel.csv","datasets/imu_timestamps.csv","datasets/image_timestamps.csv");
// cout << " Loaded everything " << endl;
graph.solve();
}
void stereoVision() {
//-- This is just a demo for showing how to read two images and then get the matched features
// Mat img_1L = imread( "datasets/cmu_16662_p2/sensor_data/left000.jpg", CV_LOAD_IMAGE_GRAYSCALE );
// Mat img_1R = imread( "datasets/cmu_16662_p2/sensor_data/right000.jpg", CV_LOAD_IMAGE_GRAYSCALE );
// Mat img_2L = imread( "datasets/cmu_16662_p2/sensor_data/left001.jpg", CV_LOAD_IMAGE_GRAYSCALE );
// Mat img_2R = imread( "datasets/cmu_16662_p2/sensor_data/right001.jpg", CV_LOAD_IMAGE_GRAYSCALE );
//-- These are the camera projection matrices
Matx34d projMat1(164.255034407511, 0.0, 214.523999214172, 0.0,
0.0, 164.255034407511, 119.433252334595, 0.0,
0.0, 0.0, 1.0, 0.0);
Matx34d projMat2(164.255034407511, 0.0, 214.523999214172, 26.62574107745753,
0.0, 164.255034407511, 119.433252334595, 0.0,
0.0, 0.0, 1.0, 0.0);
// if( !img_1L.data || !img_2R.data ){
// cout<<"Images failed!"<<endl;
// return -1;
// }
//-- Create @featureFinder (Detection,Pairing,Alignment)
FeatureDPA featureFinder;
//-- Give it two images and (true/false) if you want to see the matches
//-- @feature has keypoints for both images and the good matches between images
// Example process for two frames (4 images)
/******************************************************************/
// Feature f1 = featureFinder.findMatches(img_1L,img_1R,true);
// Feature f2 = featureFinder.findMatches(img_2L,img_2R,true);
// Feature f1WithWorlds = featureFinder.getWorldPoints(f1,projMat1,projMat2);
// Feature f2WithWorlds = featureFinder.getWorldPoints(f2,projMat1,projMat2);
// Mat tf = featureFinder.estimatePose(f1WithWorlds,f2WithWorlds);
// cout << tf << endl;
/******************************************************************/
// For creating a CSV of the transforms (plotted easier with python)
Mat pose = Mat::eye(4,4,CV_64F);
cout << "time" << "," << "T[0]" << "," << "T[1]" << "," << "T[2]" << "," << "T[3]" << ","
<< "T[4]" << "," << "T[5]" << "," << "T[6]" << "," << "T[7]" << ","
<< "T[8]" << "," << "T[9]" << "," << "T[10]"<< "," << "T[11]"<< ","
<< "T[12]"<< "," << "T[13]"<< "," << "T[14]"<< "," << "T[15]" << endl;
// Process for all of the images
Mat imgL1 = imread("datasets/cmu_16662_p2/sensor_data/left000.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat imgR1 = imread("datasets/cmu_16662_p2/sensor_data/right000.jpg",CV_LOAD_IMAGE_GRAYSCALE);
int i=0;
while(true) {
// Read A new frame (two new images, left and right)
ostringstream tmp;
tmp << i;
string num = tmp.str();
if(num.size()==1) num = "00"+num;
if(num.size()==2) num = "0"+num;
Mat imgL2 = imread("datasets/cmu_16662_p2/sensor_data/left"+num+".jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat imgR2 = imread("datasets/cmu_16662_p2/sensor_data/right"+num+".jpg",CV_LOAD_IMAGE_GRAYSCALE);
// No more images in directory
if(imgL1.rows==0 || imgR2.rows==0) break;
// Get the features between the last two frames
Feature f1 = featureFinder.findMatches(imgL1,imgR1,true);
Feature f2 = featureFinder.findMatches(imgL2,imgR2,true);
// Assign world points to the features
Feature f1WithWorlds = featureFinder.getWorldPoints(f1,projMat1,projMat2);
Feature f2WithWorlds = featureFinder.getWorldPoints(f2,projMat1,projMat2);
// Skip frames that are featureless?
// It segfaults on multiplication if you don't do this (pose*tf)
// estimatePose can't find a good pose for low feature count frames
// This may be caused by filtering "good" features?
if(f2WithWorlds.worldPoint.size()<10) {
i++;
continue;
}
// Get the delta transform
Mat tf = featureFinder.estimatePose(f1WithWorlds,f2WithWorlds);
double data[4] = {0,0,0,1};
Mat homogenous(1,4,CV_64F,&data);
tf.push_back(homogenous);
// The motion update
Mat motion = pose*tf;
pose = motion;
// Transform CSV
cout << i << "," << pose.at<double>(0,0) << "," << pose.at<double>(0,1) << "," << pose.at<double>(0,2) << "," << pose.at<double>(0,3) << ","
<< pose.at<double>(1,0) << "," << pose.at<double>(1,1) << "," << pose.at<double>(1,2) << "," << pose.at<double>(1,3) << ","
<< pose.at<double>(2,0) << "," << pose.at<double>(2,1) << "," << pose.at<double>(2,2) << "," << pose.at<double>(2,3) << ","
<< pose.at<double>(3,0) << "," << pose.at<double>(3,1) << "," << pose.at<double>(3,2) << "," << pose.at<double>(3,3) << ","
<< endl;
// Prepare the next iteration
imgL1 = imgL2;
imgR1 = imgR2;
i++;
}
}