Eigen::Vector3d pix2cam(Eigen::Matrix3d& K, Eigen::Vector3d& pix){ Eigen::Vector3d cam = K.inverse()*pix; cam.array() /= cam(2); return cam; }void find_essential_dlt(std::vector<Eigen::Vector3d>& camPoints1, std::vector<Eigen::Vector3d>& camPoints2, Eigen::Matrix3d& E){ assert(camPoints1.size()==camPoints2.size()); using namespace Eigen;
int N = camPoints1.size(); MatrixXd A(N, 9);
for (int i = 0; i <N; ++i) { Vector3d& p1 =camPoints1.at(i); Vector3d& p2 =camPoints2.at(i);
A(i, 0) = p1[0]*p2[0]; A(i, 1) = p1[1]*p2[0]; A(i, 2) = p2[0]; A(i, 3) = p1[0]*p2[1]; A(i, 4) = p1[1]*p2[1]; A(i, 5) = p2[1]; A(i, 6) = p1[0]; A(i, 7) = p1[1]; A(i, 8) = 1.0; } // SVD 分解 Eigen::JacobiSVD<MatrixXd> solver(A, Eigen::ComputeFullU|Eigen::ComputeFullV); MatrixXd V = solver.matrixV(); VectorXd X = V.col(8);
Matrix3d E_; E_ << X(0), X(1), X(2), X(3), X(4), X(5), X(6), X(7), X(8); Eigen::JacobiSVD<Matrix3d> solverF(E_, Eigen::ComputeFullU|Eigen::ComputeFullV); Matrix3d V_ = solverF.matrixV(); Matrix3d U_ = solverF.matrixU(); Vector3d sig_ = solverF.singularValues(); Matrix3d S_=Matrix3d::Zero(); double s = (sig_(0)+sig_(1))/2; S_(0, 0) = s; S_(1, 1) = s; S_(2, 2) = 0; E = U_ * S_* V_.transpose(); }根據E矩陣分解相機的姿態:
首先獲得4中可能的結果組合
void decompose_from_essential(Eigen::Matrix3d& E, Eigen::Matrix3d& R1,Eigen::Matrix3d& R2, Eigen::Vector3d& t1, Eigen::Vector3d& t2){
using namespace Eigen; Matrix3d R_90, R_90_; R_90 << 0, -1, 0, 1, 0, 0, 0, 0, 1; R_90_ << 0, 1, 0, -1, 0, 0, 0, 0, 1;
Eigen::JacobiSVD<Matrix3d> solver(E, Eigen::ComputeFullU|Eigen::ComputeFullV); Matrix3d U = solver.matrixU(); Matrix3d V = solver.matrixV(); Vector3d sig = solver.singularValues();
Matrix3d S=Matrix3d::Zero(); S(0, 0) = sig(0); S(1, 1) = sig(1); S(2, 2) = sig(2); if(U.determinant()<0.0){ for(int i=0; i<3; ++i) U(i, 2) = -U(i, 2); } if(V.determinant()<0.0){ for(int i=0; i<3; ++i) V(i, 2) = -V(i, 2); }
R1 = U*R_90*V; R2 = U*R_90_*V; t1 = U.col(2); t2 = -U.col(2);
double m10 = sqrt(R1.col(0).transpose()*R1.col(0)); double m11 = sqrt(R1.col(1).transpose()*R1.col(1)); double m12 = sqrt(R1.col(2).transpose()*R1.col(2));
R1.array().col(0) /= m10; R1.array().col(1) /= m11; R1.array().col(2) /= m12;
double m20 = sqrt(R2.col(0).transpose()*R2.col(0)); double m21 = sqrt(R2.col(1).transpose()*R2.col(1)); double m22 = sqrt(R2.col(2).transpose()*R2.col(2));
R2.array().col(0) /= m20; R2.array().col(1) /= m21; R2.array().col(2) /= m22;
double mt1 = sqrt(t1.transpose()*t1); t1.array() /= mt1;
double mt2 = sqrt(t2.transpose()*t2); t2.array() /= mt2;
}其實通過這種方式分解求出的R和t,很難確定他的尺度,所以可以把R和t 進行歸一化處理。
左相機的外參為[I, 0] , 右相機有4個可能[R1, t1] [R2, t1] [R1, t2] [R2, t2].
選擇的思路是可以拿出若干像素點對,首先用三角化求出3維世界坐標點,然後把世界坐標點通過兩個相機的外參,轉換為對應的相機坐標,根據z值的正負進行判別,如果都為正,則符合條件。我這裡選擇了一對點,如果匹配的點比較多,並且有可能有錯誤的匹配,可以通過統計的方式,根據符合條件的點的數量作為衡量依據
bool choose_pose(Eigen::Matrix3d& K1, Eigen::Matrix3d& R1, Eigen::Vector3d& t1, Eigen::Matrix3d& K2, Eigen::Matrix3d& R2, Eigen::Vector3d& t2, Eigen::Vector3d& p1, Eigen::Vector3d& p2){
Eigen::Matrix<double, 3, 4> P1, P2; P1 = Eigen::Matrix<double, 3, 4>::Zero(); P2 = Eigen::Matrix<double, 3, 4>::Zero();
P1.array().block(0, 0, 3, 3) = R1; P1.array().block(0, 3, 3, 1) = t1; P1 = K1*P1;
P2.array().block(0, 0, 3, 3) = R2; P2.array().block(0, 3, 3, 1) = t2; P2 = K2*P2;
Eigen::Vector3d out; triangulatePoint(P1, P2, p1, p2, out); Eigen::Vector3d cam = R2*out+t2;
if(out(2)>0 && cam(2)>0) return true; else return false;
}為了測試我用了OpenCV自帶的左右相機的棋盤格圖像中的一幅,內參用的也是雙目標定的內參。
具體使用: fundamental_essential_test.cpp
#include "Stereo/stereo.h"using namespace SLAM_STEREO;using namespace cv;using namespace Eigen;
int main() { std::string baseRoot="/home/atway/soft/opencv4.1/opencv-4.1.0/samples/data/"; Mat leftImg = imread(baseRoot+"left01.jpg", cv::IMREAD_GRAYSCALE); Mat rightImg = imread(baseRoot+"right01.jpg", cv::IMREAD_GRAYSCALE); Size patternSize(9, 6); Mat leftCameraMatrix, leftCameraDistCoeffs; Mat rightCameraMatrix, rightCameraDistCoeffs;
FileStorage fs(baseRoot+"intrinsics.yml", FileStorage::READ); if( fs.isOpened() ) { fs["M1"] >> leftCameraMatrix; fs["D1"] >> leftCameraDistCoeffs; fs["M2"] >> rightCameraMatrix; fs["D2"] >> rightCameraDistCoeffs; fs.release(); } else std::cout << "Error: can not load the intrinsic parameters\n";
std::vector<Point2f> leftImgCorners, rightImgCorners; Mat drawLeftImg, drawRightImg; SLAM_STEREO::findCorners(leftImg, patternSize, leftImgCorners,drawLeftImg); SLAM_STEREO::findCorners(rightImg, patternSize, rightImgCorners,drawRightImg);
std::vector<Point2f> leftUndistImgCorners, rightUndistImgCorners; cv::undistortPoints(leftImgCorners, leftUndistImgCorners, leftCameraMatrix, leftCameraDistCoeffs); cv::undistortPoints(rightImgCorners, rightUndistImgCorners, rightCameraMatrix, rightCameraDistCoeffs);
std::vector<Eigen::Vector3d> leftUndistImgCornersEigen, rightUndistImgCornersEigen; for (int i = 0; i < leftImgCorners.size(); ++i) { Point2f& pt1 = leftUndistImgCorners.at(i); Point2f& pt2 = rightUndistImgCorners.at(i); leftUndistImgCornersEigen.push_back(Eigen::Vector3d(pt1.x, pt1.y, 1.)); rightUndistImgCornersEigen.push_back(Eigen::Vector3d(pt2.x, pt2.y, 1.)); }
{ Matrix3d F; find_fundamental_dlt(leftUndistImgCornersEigen, rightUndistImgCornersEigen, F); std::cout << "fundamental:" << F << std::endl;
for(int i=0; i<leftImgCorners.size(); ++i){ float d = rightUndistImgCornersEigen[i].transpose()*F*leftUndistImgCornersEigen[i]; std::cout << i << " fundamental epipolar constraint: " << d << std::endl; }
}
std::vector<Vector3d> camPoints1, camPoints2; { Eigen::Matrix3d E, K1, K2; cv::cv2eigen(leftCameraMatrix, K1); cv::cv2eigen(rightCameraMatrix, K2);
for(int i=0; i<leftUndistImgCornersEigen.size(); ++i){ camPoints1.push_back(pix2cam(K1, leftUndistImgCornersEigen[i])); camPoints2.push_back(pix2cam(K2, rightUndistImgCornersEigen[i])); } find_essential_dlt(camPoints1, camPoints2, E); std::cout << "essential:" << E<< std::endl;
Matrix3d R1, R2; Vector3d t1, t2; decompose_from_essential(E, R1, R2, t1, t2);
Matrix3d I = Matrix3d ::Identity(); Vector3d t0 = Vector3d::Zero();
{
Matrix3d R; Vector3d t; { bool ok1 = choose_pose(K1, I, t0, K2, R1, t1, leftUndistImgCornersEigen[0], rightUndistImgCornersEigen[0]); bool ok2 = choose_pose(K1, I, t0, K2, R1, t2, leftUndistImgCornersEigen[0], rightUndistImgCornersEigen[0]); bool ok3 = choose_pose(K1, I, t0, K2, R2, t1, leftUndistImgCornersEigen[0], rightUndistImgCornersEigen[0]); bool ok4 = choose_pose(K1, I, t0, K2, R2, t2, leftUndistImgCornersEigen[0], rightUndistImgCornersEigen[0]); std::cout <<" ok1 " << ok1<< " ok2 " << ok2 << " ok3 " << ok3<< " ok4 " << ok4<< std::endl; if(ok1) { R = R1; t=t1; }else if(ok2){ R=R1; t=t2; }else if(ok3){ R=R2; t=t1; }else if(ok4){ R=R2; t=t2; } }
Matrix3d txR = corss(t)*R; for(int i=0; i<camPoints1.size(); ++i){ Vector3d& p1 = camPoints1[i]; Vector3d& p2 = camPoints2[i]; float d= p2.transpose()*txR*p1; std::cout << i << " E epipolar constraint: " << d << std::endl; }
}
}
return 0;}時間倉促,代碼難免會有錯誤,如有疑問可以在github上提交。
源碼地址:
https://github.com/zhaitianyong/slam_algorithm/tree/master/Stereo
Happy ~~~