OpenCV: Fundamental matrix accuracy

3.9k views Asked by At

I am trying to calculate the fundamental matrix of 2 images (different photos of a static scene taken by a same camera).

I calculated it using findFundamentalMat and I used the result to calculate other matrices (Essential, Rotation, ...). The results were obviously wrong. So, I tried to be sure of the accuracy of the calculated fundamental matrix.

Using the epipolar constraint equation, I Computed fundamental matrix error. The error is very high (like a few hundreds). I do not know what is wrong about my code. I really appreciate any help. In particular: Is there any thing that I am missing in Fundamental matrix calculation? and is the way that I calculate the error right?

Also, I ran the code with very different number of matches. There are usually lots of outliers. e.g in a case with more than 80 matches, there was only 10 inliers.

Mat img_1 = imread( "imgl.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "imgr.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }

//-- Step 1: Detect the keypoints using SURF Detector

int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;

detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );

//-- Step 2: Calculate descriptors (feature vectors)

SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );

//-- Step 3: Matching descriptor vectors with a brute force matcher

BFMatcher matcher(NORM_L1, true);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );

vector<Point2f>imgpts1,imgpts2;
for( unsigned int i = 0; i<matches.size(); i++ )
{
    // queryIdx is the "left" image
    imgpts1.push_back(keypoints_1[matches[i].queryIdx].pt);
    // trainIdx is the "right" image
    imgpts2.push_back(keypoints_2[matches[i].trainIdx].pt);
}

//-- Step 4: Calculate Fundamental matrix

Mat f_mask;
Mat F =  findFundamentalMat  (imgpts1, imgpts2, FM_RANSAC, 0.5, 0.99, f_mask);

//-- Step 5: Calculate Fundamental matrix error

//Camera intrinsics
double data[] = {1189.46 , 0.0, 805.49,
                0.0, 1191.78, 597.44,
                0.0, 0.0, 1.0};
Mat K(3, 3, CV_64F, data);
//Camera distortion parameters
double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
Mat D(1, 5, CV_64F, dist);

//working with undistorted points
vector<Point2f> undistorted_1,undistorted_2;
vector<Point3f> line_1, line_2;
undistortPoints(imgpts1,undistorted_1,K,D);
undistortPoints(imgpts2,undistorted_2,K,D);
computeCorrespondEpilines(undistorted_1,1,F,line_1);
computeCorrespondEpilines(undistorted_2,2,F,line_2);

double f_err=0.0;
double fx,fy,cx,cy;
fx=K.at<double>(0,0);fy=K.at<double>(1,1);cx=K.at<double>(0,2);cy=K.at<double>(1,2);
Point2f pt1, pt2;
int inliers=0;
//calculation of fundamental matrix error for inliers
for (int i=0; i<f_mask.size().height; i++)
    if (f_mask.at<char>(i)==1)
    {
        inliers++;
        //calculate non-normalized values
        pt1.x = undistorted_1[i].x * fx + cx;
        pt1.y = undistorted_1[i].y * fy + cy;
        pt2.x = undistorted_2[i].x * fx + cx;
        pt2.y = undistorted_2[i].y * fy + cy;
        f_err += = fabs(pt1.x*line_2[i].x +
                pt1.y*line_2[i].y + line_2[i].z)
                + fabs(pt2.x*line_1[i].x +
                pt2.y*line_1[i].y + line_1[i].z);
    }

double AvrErr = f_err/inliers;
2

There are 2 answers

0
koshy george On
  • Given that we are supplied with the intrinsic matrix K, and distortion matrix D, we should undistort the image points before feeding it to findFundamentalMat and should work on undistorted image co-ordinatates henceforth (ie for computing the error). I found that this simple change reduced the maximum error of any image point pair from 176.0f to 0.2, and the number of inliers increased from 18 to 77.

  • I also toyed with normalizing the undistorted image points before it to findFundamentalMat, which reduced the maximum error of any image point pair to almost zero, though it does not increase the number of inliers any further.

    const float kEpsilon = 1.0e-6f;
    
    float sampsonError(const Mat &dblFMat, const Point2f &pt1, const Point2f &pt2)
    {
    
    
        Mat m_pt1(3, 1 , CV_64FC1 );//m_pt1(pt1);
        Mat m_pt2(3, 1 , CV_64FC1 );
        m_pt1.at<double>(0,0) = pt1.x; m_pt1.at<double>(1,0) = pt1.y; m_pt1.at<double>(2,0) = 1.0f;
        m_pt2.at<double>(0,0) = pt2.x; m_pt2.at<double>(1,0) = pt2.y; m_pt2.at<double>(2,0) = 1.0f;
    
        assert(dblFMat.rows==3 && dblFMat.cols==3);
        assert(m_pt1.rows==3 && m_pt1.cols==1);
        assert(m_pt2.rows==3 && m_pt2.cols==1);
        Mat dblFMatT(dblFMat.t());
        Mat dblFMatp1=(dblFMat * m_pt1);
        Mat dblFMatTp2=(dblFMatT * m_pt2);
        assert(dblFMatp1.rows==3 && dblFMatp1.cols==1);
        assert(dblFMatTp2.rows==3 && dblFMatTp2.cols==1);
    
        Mat numerMat=m_pt2.t() * dblFMatp1;
        double numer=numerMat.at<double>(0,0);
        if (numer < kEpsilon)
        {
            return 0;
    
        } else {
            double denom=dblFMatp1.at<double>(0,0) + dblFMatp1.at<double>(1,0) +  dblFMatTp2.at<double>(0,0) + dblFMatTp2.at<double>(1,0);
            double ret=(numer*numer)/denom;
            return (numer*numer)/denom;
        }
    }
    
    #define UNDISTORT_IMG_PTS 1
    #define NORMALIZE_IMG_PTS 1
    
    int filter_imgpts_pairs_with_epipolar_constraint(
        const vector<Point2f> &raw_imgpts_1,
        const vector<Point2f> &raw_imgpts_2,
        int imgW,
        int imgH
    )
    {
    
    #if UNDISTORT_IMG_PTS
        //Camera intrinsics
        double data[] = {1189.46 , 0.0, 805.49,
                        0.0, 1191.78, 597.44,
                        0.0, 0.0, 1.0};
        Mat K(3, 3, CV_64F, data);
        //Camera distortion parameters
        double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
        Mat D(1, 5, CV_64F, dist);
    
    
        //working with undistorted points
        vector<Point2f> unnormalized_imgpts_1,unnormalized_imgpts_2;
        undistortPoints(raw_imgpts_1,unnormalized_imgpts_1,K,D);
        undistortPoints(raw_imgpts_2,unnormalized_imgpts_2,K,D);
    
    #else
        vector<Point2f> unnormalized_imgpts_1(raw_imgpts_1);
        vector<Point2f> unnormalized_imgpts_2(raw_imgpts_2);
    #endif
    
    
    
    #if NORMALIZE_IMG_PTS
    
        float c_col=imgW/2.0f;
        float c_row=imgH/2.0f;
        float multiply_factor= 2.0f/(imgW+imgH);
    
        vector<Point2f> final_imgpts_1(unnormalized_imgpts_1);
        vector<Point2f> final_imgpts_2(unnormalized_imgpts_2);
    
        for( auto iit=final_imgpts_1.begin(); iit != final_imgpts_1.end(); ++ iit)
        {
            Point2f &imgpt(*iit);
            imgpt.x=(imgpt.x - c_col)*multiply_factor;
            imgpt.y=(imgpt.y - c_row)*multiply_factor;
        }
        for( auto iit=final_imgpts_2.begin(); iit != final_imgpts_2.end(); ++ iit)
        {
            Point2f &imgpt(*iit);
            imgpt.x=(imgpt.x - c_col)*multiply_factor;
            imgpt.y=(imgpt.y - c_row)*multiply_factor;
        }
    
    #else
    
        vector<Point2f> final_imgpts_1(unnormalized_imgpts_1);
        vector<Point2f> final_imgpts_2(unnormalized_imgpts_2);
    #endif
    
        int algorithm=FM_RANSAC;
        //int algorithm=FM_LMEDS;
    
    
        vector<uchar>status;
    
        Mat F =  findFundamentalMat  (final_imgpts_1, final_imgpts_2, algorithm, 0.5, 0.99, status);
        int n_inliners=std::accumulate(status.begin(), status.end(), 0);
    
    
    
        assert(final_imgpts_1.size() == final_imgpts_2.size());
        vector<float> serr;
        for( unsigned int i = 0; i< final_imgpts_1.size(); i++ )
        {
            const Point2f &p_1(final_imgpts_1[i]);
            const Point2f &p_2(final_imgpts_2[i]);
            float err= sampsonError(F, p_1, p_2);
            serr.push_back(err);
        }
        float max_serr=*max_element(serr.begin(), serr.end());
        cout << "found " << raw_imgpts_1.size() << "matches " << endl;
        cout << " and " << n_inliners << " inliners" << endl;
        cout << " max sampson err" << max_serr << endl;
        return 0;
    }
    
0
Wesam Na On

I believe the problem is because you calculated the Fundamental matrix based on brute force matcher only, you should make some more optimization for these corresponding point, like ration test and symmetric test. I recommend you to ready page 233, from book "OpenCV2 Computer Vision Application Programming Cookbook" Chapter 9. Its explained very well!