Opengl Augmented Reality in Android from solvepnp

759 views Asked by At

I am trying to project a 3D surface in opengl on top of a camera view in Android. I am using opencv function solvepnp in native code to get the rotation and translation vectors of the device camera. I use these vectors to calculate the modelView matrix and modelViewProjection matrix as given in the following link:

http://spottrlabs.blogspot.com/2012/07/opencv-and-opengl-not-always-friends.html

Finally, I pass these matrices to the java code in a float array but when I use these matrices in my renderer I don't get the surface projection on the screen. I am sure that my 3D surface is generated properly because when I use an arbitrary value of modelView and modelViewProjection matrices I can see the projection. But when I use the matrices computed from the solvepnp function I don't get anything. Can someone please tell me where I am wrong? Following are the relevant pieces of my code:

"Native code in cpp file"

Rodrigues(r, expandedR);  // r is the rotation vector received form solvepnp function

Mat Rt = Mat::zeros(4, 4, CV_64FC1);
for (int y = 0; y < 3; y++) {
   for (int x = 0; x < 3; x++) {
      Rt.at<double>(y, x) = expandedR.at<double>(y, x);
   }
   Rt.at<double>(y, 3) = t.at<double>(y, 0);    //t is the translation vector from solvepnp
}
Rt.at<double>(3, 3) = 1.0;

//OpenGL has reversed Y & Z coords
Mat reverseYZ = Mat::eye(4, 4, CV_64FC1);
reverseYZ.at<double>(1, 1) = reverseYZ.at<double>(2, 2) = -1;

//since we are in landscape mode
Mat rot2D = Mat::eye(4, 4, CV_64FC1);
rot2D.at<double>(0, 0) = rot2D.at<double>(1, 1) = 0;
rot2D.at<double>(0, 1) = 1;
rot2D.at<double>(1, 0) = -1;

Mat projMat = Mat::zeros(4, 4, CV_64FC1);
float fard = 10000, neard = 5;
float imageWidth=1024.0f;
float imageHeight=576.0f;
projMat.at<double>(0, 0) = 2*scaledCameraMatrix.at<double>(0, 0)/imageWidth;
projMat.at<double>(0, 2) = -1 + (2*scaledCameraMatrix.at<double>(0, 2)/imageWidth);
projMat.at<double>(1, 1) = 2*scaledCameraMatrix.at<double>(1, 1)/imageHeight;
projMat.at<double>(1, 2) = -1 + (2*scaledCameraMatrix.at<double>(1, 2)/imageHeight);
projMat.at<double>(2, 2) = -(fard+neard)/(fard-neard);
projMat.at<double>(2, 3) = -2*fard*neard/(fard-neard);
projMat.at<double>(3, 2) = -1;

Mat mvMat = reverseYZ * Rt;
projMat = rot2D * projMat;

Mat mvp = projMat * mvMat;
float arr[16];
float arr2[16];

int count=0;
for(int i=0;i<4;i++)
{
    for(int j=0;j<4;j++)
    {
        arr[count]=(float)mvp.at<double>(i,j);
        arr2[count++]=(float)mvMat.at<double>(i,j);
    }
}

env->SetFloatArrayRegion(sendfloatArray, 0,16, &arr[0]);
env->SetFloatArrayRegion(sendfloatArray, 16,16, &arr2[0]);

The following is the code in my Renderer class

    // Pass in the modelview matrix ma.mvMatrix received from native code
    GLES20.glUniformMatrix4fv(mvMatrixUniform, 1, false,ma.mvMatrix, 0); 



    // Pass in the modelviewprojection matrix ma.mvpMatrix received from native code
    GLES20.glUniformMatrix4fv(mvpMatrixUniform, 1, false, ma.mvpMatrix, 0);

The following is the code in my fragment shader:

precision mediump float;        // Set the default precision to medium. We don't need as high of a 
                            // precision in the fragment shader.



 varying vec4 v_Color;              // This is the color from the vertex shader interpolated across the 
                            // triangle per fragment.


  // The entry point for our fragment shader.
  void main() {                              


  gl_FragColor = v_Color;                                       
}                                                                       

The following is the code in my vertex shader

  uniform mat4 u_MVPMatrix;     // A constant representing the combined model/view/projection matrix.                  
 uniform mat4 u_MVMatrix;       // A constant representing the combined model/view matrix.              

  attribute vec4 a_Position;        // Per-vertex position information we will pass in.                 
  attribute vec4 a_Color;           // Per-vertex color information we will pass in.                


   varying vec4 v_Color;            // This will be passed into the fragment shader.                


  // The entry point for our vertex shader.  
 void main() {                                                    



// Pass through the color.
v_Color = a_Color;



// gl_Position is a special variable used to store the final position.
// Multiply the vertex by the matrix to get the final point in normalized screen coordinates.
gl_Position = u_MVPMatrix * a_Position;                               
}                                  
1

There are 1 answers

2
ponfey he On
projMat.at<double>(2, 3) = -1;
projMat.at<double>(3, 2) = -2*fard*neard/(fard-neard);