I am running bundle adjustment on some known data using g2o
libraries.
My function is based on the one in OrbSlam2
, and takes in two camera poses, and a std::vector
of 3d points with corresponding 2d points, or 'observations' in each of the camera poses.
It runs fine, and the reprojected points look correct when I use the known(correct) camera poses. When i add noise to the second camera pose, however, it still runs fine, and completes optimisation, but the resulting poses look exactly the same as the noisy input data. The full function is:
void BundleAdjustment::Solve(const std::vector<KeyFrame *> &vpKFs, const std::vector<MapPoint *> &allMapPoints,cv::Mat intrinsic, cv::Mat dist,
std::vector<cv::Point2f> &projPnts)
{
dogleg = false;
int64 t0 = cv::getTickCount();
g2o::SparseOptimizer optimizer;
optimizer.setVerbose(false);
std::unique_ptr<g2o::BlockSolver_6_3::LinearSolverType> linearSolver;
if (dense)
{
linearSolver = g2o::make_unique<g2o::LinearSolverDense<g2o::BlockSolver_6_3::PoseMatrixType>>();
}
else
{
linearSolver = g2o::make_unique < g2o::LinearSolverEigen<g2o::BlockSolver_6_3::PoseMatrixType>>();
}
if (dogleg)
{
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(
g2o::make_unique<g2o::BlockSolver_6_3>(std::move(linearSolver)));
optimizer.setAlgorithm(solver);
}
else
{
g2o::OptimizationAlgorithmDogleg* solver = new g2o::OptimizationAlgorithmDogleg(
g2o::make_unique<g2o::BlockSolver_6_3>(std::move(linearSolver)));
optimizer.setAlgorithm(solver);
}
int num_cameras_ = vpKFs.size();
int num_points_ = allMapPoints.size();
const float thHuber2D = sqrt(5.99);
const int nExpectedSize = (vpKFs.size() + allMapPoints.size());
vector<g2o::EdgeSE3ProjectXYZ*> vpEdgesMono;
vpEdgesMono.reserve(nExpectedSize);
//camera poses
for (size_t i = 0; i < vpKFs.size(); i++)
{
KeyFrame *pKF = vpKFs[i];
g2o::VertexSE3Expmap *vSE3 = new g2o::VertexSE3Expmap();
g2o::SE3Quat squat;
Eigen::Quaterniond qq(pKF->quat.w(), pKF->quat.x(), pKF->quat.y(), pKF->quat.z());
squat.setRotation(qq);
Eigen::Vector3d pp(pKF->pos.x(), pKF->pos.y(), pKF->pos.z());
if (pKF->id == 1)
{
pp.x() += 1.2; //ADD NOISE
}
squat.setTranslation(pp);
vSE3->setEstimate(squat);
vSE3->setId(pKF->id);
if (pKF->id == 0)
{
vSE3->setFixed(true);
}
optimizer.addVertex(vSE3);
}
// set point vertex
for (size_t i = 0; i < allMapPoints.size(); i++)
{
MapPoint* pMP = allMapPoints[i];
g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ();
g2o::Vector3 pp(pMP->pos.x, pMP->pos.y, pMP->pos.z);
vPoint->setEstimate(pp);
vPoint->setId(i + num_cameras_);
vPoint->setMarginalized(true);
optimizer.addVertex(vPoint);
//
//SET EDGES
const map<KeyFrame*, size_t> observations = pMP->mObs;
for (std::map<KeyFrame *, size_t>::const_iterator mit = observations.begin(); mit != observations.end(); mit++)
{
KeyFrame *pKF = mit->first;
const cv::Point2f &kpUn = pKF->vObs_pos[mit->second];
Eigen::Matrix<double, 2, 1> obs;
obs << kpUn.x, kpUn.y;
g2o::EdgeSE3ProjectXYZ *e = new g2o::EdgeSE3ProjectXYZ();
// g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
// g2o::RobustKernelDCS* rk = new g2o::RobustKernelDCS;
g2o::RobustKernelCauchy* rk = new g2o::RobustKernelCauchy;
rk->setDelta(thHuber2D);
e->setRobustKernel(rk);
const int camera_id = pKF->id;
int point_id = i + num_cameras_;
e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex *>(optimizer.vertex(point_id)));
e->setVertex(1, dynamic_cast<g2o::OptimizableGraph::Vertex *>(optimizer.vertex(camera_id)));
e->setMeasurement(obs);
e->setInformation(Eigen::Matrix2d::Identity());
e->fx = pKF->fx;
e->fy = pKF->fy;
e->cx = pKF->cx;
e->cy = pKF->cy;
vpEdgesMono.push_back(e);
optimizer.addEdge(e);
}
}
// start optimization
std::cout << "Optimization starts..." << std::endl;
optimizer.initializeOptimization();
optimizer.optimize(20);
std::cout << "Optimization completed!" << std::endl;
std::cout << "Writing camera result" << std::endl;
for (int i = 0; i < num_cameras_; i++)
{
g2o::VertexSE3Expmap *pCamera = dynamic_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(i));
g2o::SE3Quat SE3quat = pCamera->estimate();
g2o::Quaternion gQuat = SE3quat.rotation();
g2o::Vector3 gTrans = SE3quat.translation();
if (i == 1)
{
Eigen::Matrix<double, 3, 3> er(gQuat);
cv::Mat R;
cv::Mat t;
cv::Mat rvec;
eigen2cv(er, R);
eigen2cv(gTrans, t);
cv::Rodrigues(R, rvec);
std::vector<cv::Point3f> Pnts;
for (int x = 1; x < allMapPoints.size(); x++)
{
Pnts.push_back(allMapPoints[x]->pos);
}
cv::projectPoints(Pnts, rvec, t, intrinsic, dist, projPnts);
std::cout << i << " " << gTrans.x() << " " << gTrans.y() << " " << gTrans.z() << " " << gQuat.x() << " " << gQuat.y() << " " << gQuat.z() << " " << gQuat.w() << std::endl;
}
}
}
Where am i going wrong with this? Is my data not being optimised? Or am i reading the data back incorrectly?