Inconsistent Intrinsic Parameters in Stereo Camera Calibration Using ChAruco Board

90 views Asked by At

I am calibrating a stereo system using a ChAruco board, chosen due to the short focus distances of the cameras in my setup, which prevent me from capturing an entire classic chessboard grid on both cameras simultaneously.

I've encountered an issue where the intrinsic parameters obtained are unexpectedly inconsistent, with each camera—despite being of the same model and configured with equal baselines and parallel (~10cm apart) alignments—yielding different results. I have a benchmark for what the intrinsics should resemble, as I am able to calibrate each camera individually with satisfactory outcomes. This discrepancy becomes apparent when comparing these individual calibrations to the results obtained from simultaneous stereo system calibration.

Code:

import numpy as np
import cv2 
import glob
from matplotlib import pyplot as plt
from cv2 import aruco
import os

################ FIND CHESSBOARD CORNERS - OBJECT POINTS AND IMAGE POINTS #############################

frameSize = (1440,1080)
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_5X5_1000)
charuco_board = aruco.CharucoBoard((20, 30), 17, 8.5, aruco_dict) 
charuco_detector = aruco.CharucoDetector(charuco_board)

# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpointsL = [] # 2d points in image plane.
imgpointsR = [] # 2d points in image plane.


imagesLeft = sorted(glob.glob('images/StereoLeft/images_1/*.png'))
imagesRight = sorted(glob.glob('images/StereoRight/images_1/*.png'))

for imgLeft, imgRight in zip(imagesLeft, imagesRight):
    
    imgL = cv2.imread(imgLeft)
    imgR = cv2.imread(imgRight)
    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    cornersL, idsL, _ = aruco.detectMarkers(grayL, aruco_dict)
    cornersR, idsR, _ = aruco.detectMarkers(grayR, aruco_dict)

    aruco.drawDetectedMarkers(imgL, cornersL, idsL)
    aruco.drawDetectedMarkers(imgR, cornersR, idsR)

    # If found, add object points, image points (after refining them)
    if idsL is not None and idsR is not None:  # Ensure markers are detected in both images
        retL, charucoCornersL, charucoIdsL = aruco.interpolateCornersCharuco(cornersL, idsL, grayL, charuco_board)
        retR, charucoCornersR, charucoIdsR = aruco.interpolateCornersCharuco(cornersR, idsR, grayR, charuco_board)
        if retL > 0 and retR > 0:  # If ChArUco corners are interpolated

            if charucoIdsL is None:
                charucoIdsL = np.array([])
            if charucoIdsR is None:
                charucoIdsR = np.array([])

            commonIds = np.intersect1d(charucoIdsL, charucoIdsR, assume_unique=True)
            
            if commonIds.size > 0:
                # Filter corners with matching ids
                indicesL = np.isin(charucoIdsL, commonIds).flatten()
                indicesR = np.isin(charucoIdsR, commonIds).flatten()

                matchingCornersL = charucoCornersL[indicesL]
                matchingCornersR = charucoCornersR[indicesR]
                matchingIds = charucoIdsL[indicesL]  # charucoIdsR[indicesR] would be the same
                # print(charucoIdsL[indicesL].flatten(),charucoIdsR[indicesR].flatten())
                # Generate object points
                
                objp, imgPointsL = aruco.getBoardObjectAndImagePoints(charuco_board, matchingCornersL, matchingIds)
                _, imgPointsR = aruco.getBoardObjectAndImagePoints(charuco_board, matchingCornersR, matchingIds)
                
                min_points_required = 6
                if len(imgPointsL) >= min_points_required and len(imgPointsR) >= min_points_required:
                    objpoints.append(objp)
                    imgpointsL.append(imgPointsL)
                    imgpointsR.append(imgPointsR)

        cv2.imshow('img left', imgL)
        cv2.imshow('img right', imgR)
        cv2.waitKey(500)


cv2.destroyAllWindows()

############## CALIBRATION #######################################################

retL, cameraMatrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv2.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))

retR, cameraMatrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv2.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))

print(newCameraMatrixL)
print()
print(newCameraMatrixR)

directory_path = os.getcwd()

# Specify the file name
file_name = "intrinsics.txt"

# Combine the directory path and file name to form the full file path
file_path = os.path.join(directory_path, file_name)

# Writing to the file
with open(file_path, 'w') as file:
    # Writing Camera 1 matrix
    file.write('Matrix L\nCamera 1\n')
    for row in newCameraMatrixL:
        file.write(' '.join(map(str, row)) + '\n')
    file.write('\n')
    
    # Writing Camera 2 matrix
    file.write('Matrix R\nCamera 2\n')
    for row in newCameraMatrixR:
        file.write(' '.join(map(str, row)) + '\n')


########## Stereo Vision Calibration #############################################

flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same 

criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# This step is performed to transformation between the two cameras and calculate Essential and Fundamental matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)


# Reprojection Error
mean_error = 0

for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecsL[i], tvecsL[i], newCameraMatrixL, distL)
    error = cv2.norm(imgpointsL[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
    mean_error += error


print("Total error: {}".format(mean_error/len(objpoints)))


########## Stereo Rectification #################################################

rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv2.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
print(Q)

stereoMapL = cv2.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv2.CV_16SC2)
stereoMapR = cv2.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv2.CV_16SC2)

print("Saving parameters!")
cv_file = cv2.FileStorage('stereoMap.xml', cv2.FILE_STORAGE_WRITE)

cv_file.write('stereoMapL_x',stereoMapL[0])
cv_file.write('stereoMapL_y',stereoMapL[1])
cv_file.write('stereoMapR_x',stereoMapR[0])
cv_file.write('stereoMapR_y',stereoMapR[1])
cv_file.write('q', Q)

cv_file.release()

Initially, my goal is to identify the matching corners visible to both cameras. Following this, I proceed to add the image points to arrays for both the left and right cameras. Object points, however, are extracted exclusively from one camera.

Specific lines of code:

for imgLeft, imgRight in zip(imagesLeft, imagesRight):
    
    imgL = cv2.imread(imgLeft)
    imgR = cv2.imread(imgRight)
    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

    cornersL, idsL, _ = aruco.detectMarkers(grayL, aruco_dict)
    cornersR, idsR, _ = aruco.detectMarkers(grayR, aruco_dict)

    aruco.drawDetectedMarkers(imgL, cornersL, idsL)
    aruco.drawDetectedMarkers(imgR, cornersR, idsR)

    # If found, add object points, image points (after refining them)
    if idsL is not None and idsR is not None:  # Ensure markers are detected in both images
        retL, charucoCornersL, charucoIdsL = aruco.interpolateCornersCharuco(cornersL, idsL, grayL, charuco_board)
        retR, charucoCornersR, charucoIdsR = aruco.interpolateCornersCharuco(cornersR, idsR, grayR, charuco_board)
        if retL > 0 and retR > 0:  # If ChArUco corners are interpolated

            if charucoIdsL is None:
                charucoIdsL = np.array([])
            if charucoIdsR is None:
                charucoIdsR = np.array([])

            commonIds = np.intersect1d(charucoIdsL, charucoIdsR, assume_unique=True)
            
            if commonIds.size > 0:
                # Filter corners with matching ids
                indicesL = np.isin(charucoIdsL, commonIds).flatten()
                indicesR = np.isin(charucoIdsR, commonIds).flatten()

                matchingCornersL = charucoCornersL[indicesL]
                matchingCornersR = charucoCornersR[indicesR]
                matchingIds = charucoIdsL[indicesL]  # charucoIdsR[indicesR] would be the same
                # print(charucoIdsL[indicesL].flatten(),charucoIdsR[indicesR].flatten())
                # Generate object points
                
                objp, imgPointsL = aruco.getBoardObjectAndImagePoints(charuco_board, matchingCornersL, matchingIds)
                _, imgPointsR = aruco.getBoardObjectAndImagePoints(charuco_board, matchingCornersR, matchingIds)
                
                min_points_required = 6
                if len(imgPointsL) >= min_points_required and len(imgPointsR) >= min_points_required:
                    objpoints.append(objp)
                    imgpointsL.append(imgPointsL)
                    imgpointsR.append(imgPointsR)

        cv2.imshow('img left', imgL)
        cv2.imshow('img right', imgR)
        cv2.waitKey(500)

############## CALIBRATION #######################################################

retL, cameraMatrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv2.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))

retR, cameraMatrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv2.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))

print(newCameraMatrixL)
print()
print(newCameraMatrixR)

Results:

[[5.51660251e+03 0.00000000e+00 6.16936188e+02]
 [0.00000000e+00 1.42359236e+04 5.30500368e+02]
 [0.00000000e+00 0.00000000e+00 1.00000000e+00]]

[[2.22078760e+03 0.00000000e+00 7.22814921e+02]
 [0.00000000e+00 2.22879614e+03 5.06648447e+02]
 [0.00000000e+00 0.00000000e+00 1.00000000e+00]]

The differences between the two matrices are quite significant, with the bottom matrix appearing to be well-calibrated, in contrast to the top one which presents odd values not aligning with the intrinsics obtained from individual calibrations. This issue persisted across various datasets, including those with 10, 20, 100, and 400 captured images, leaving me at a loss to find the root cause of the discrepancy. The problem persists when loading camera images in the opposite direction, resulting in inaccuracies in the bottom matrix.

Illustration of the setup:

Setup

0

There are 0 answers