Hi,
I have found a set of parameters for which solvePnP does not give correct results although :
- the image points are perfect coordinates obtained with projectPoints applied to the objectPoints (no error at all in the image points)
- the objects points consist of 4 non coplanar points
- the parameters are quite innocent looking
- cvPOSIT is perfectly able to reconstruct the pose, when solvePnp seems not to be able to do so
When I reproject the point using the rotation and translation provided by solvePnP, I have a total error of about 8 pixels (I have tried all flags : ITERATIVE, P3P and EPNP)
If instead I use the rotation and translation provided by cvPOSIT the reprojection error can be reduced to 0.00014 pixels !
Here is a short program that demonstrates the error : I simply instantiate a camera matrix, some object points and their corresponding image points obtained through projectPoints.
Then I compare the reprojection and I get 8 pixels of error!
#include
#include
#include
void TestSolvePnp()
{
std::vector objectPoints;
objectPoints.push_back(cv::Point3f(0.f, 0.f, 0.f));
objectPoints.push_back(cv::Point3f(62.3174629f, 15.7940502f, 0.819983721f));
objectPoints.push_back(cv::Point3f(-62.1225319f, 15.7540569f, 0.819464564f));
objectPoints.push_back(cv::Point3f(-0.372639507f, 16.4230633f, -36.5060043f));
cv::Mat_ cameraIntrinsicParams(cv::Size(3, 3));
cameraIntrinsicParams = 0.;
cameraIntrinsicParams(0, 0) = 3844.4400000000001f;
cameraIntrinsicParams(1, 1) = 3841.0599999999999f;
cameraIntrinsicParams(0, 2) = 640.f;
cameraIntrinsicParams(1, 2) = 380.f;
cameraIntrinsicParams(2, 2) = 1.f;
cv::Mat_ distCoeffs(cv::Size(1, 4));
distCoeffs = 0.f;
cv::Mat_ rotation(cv::Size(3, 1));
rotation(0,0) = 0.07015543380659847f;
rotation(0,1) = 0.06922079477774973f;
rotation(0,2) = -0.00254676088325f;
cv::Mat_ translation(cv::Size(3, 1));
translation(0,0) = -35.3236f;
translation(0,1) = -48.1699f;
translation(0,2) = 769.068f;
std::vector imagePoints;
cv::projectPoints(objectPoints, rotation, translation, cameraIntrinsicParams, distCoeffs, imagePoints);
cv::Mat_ rotation2(cv::Size(3, 1));
cv::Mat_ translation2(cv::Size(3, 1));
cv::solvePnP(objectPoints, imagePoints, cameraIntrinsicParams, distCoeffs,
rotation2, translation2,
//false,//useExtrinsicGuess
//cv::EPNP //fail
//cv::P3P // fails also
CV_ITERATIVE // fails
);
std::vector imagePoints_Reproj(3);
cv::projectPoints(objectPoints, rotation2, translation2, cameraIntrinsicParams, distCoeffs, imagePoints_Reproj);
float sum = 0.;
for (size_t i = 0; i < imagePoints.size(); i++)
sum += DistPoint(imagePoints_Reproj[i], imagePoints[i]);
std::cout << "sum=" << sum << std::endl;
}
int main(int argc, char **argv)
{
TestSolvePnp();
}
However, if I replace the call to solvePnp by a call to cvPOSIT, the reprojection error goes down to 0.00014 pixels !
Below is the implementation of a function named MySolvePnpPosit() that reproduces the behavior of solvePnP, but uses cvPOSIT internally => when I use it, the error goes down almost zero.
namespace
{
void Posit_IdealCameraCoords(const std::vector& objectPoints, const std::vector& imagePoints,
cv::Mat &outRotationEstimated, cv::Mat & outTranslationEstimated)
{
CvPoint2D32f * imagePoints_c = (CvPoint2D32f *) malloc(sizeof(CvPoint2D32f) * imagePoints.size());
{
for (size_t i = 0; i < imagePoints.size(); i++)
imagePoints_c[i] = cvPoint2D32f(imagePoints[i].x, imagePoints[i].y);
}
CvPoint3D32f * objectPoints_c = (CvPoint3D32f *) malloc(sizeof(CvPoint3D32f) * objectPoints.size());
{
for (size_t i = 0; i < objectPoints.size(); i++)
objectPoints_c[i] = cvPoint3D32f(objectPoints[i].x, objectPoints[i].y, objectPoints[i].z);
}
CvPOSITObject * positObject = cvCreatePOSITObject(objectPoints_c, objectPoints.size() );
CvTermCriteria criteria;
criteria.type = CV_TERMCRIT_EPS|CV_TERMCRIT_ITER;
criteria.epsilon = 0.00000000000000010;
criteria.max_iter = 30;
float positTranslationArray[3];
float positRotationArray[9];
const double idealFocal = 1.;
cvPOSIT(positObject, imagePoints_c,
idealFocal, criteria,
positRotationArray, positTranslationArray);
cv::Mat positRotationMat1x3;
{
cv::Mat_ positRotationMat3x3(cv::Size(3, 3));
{
int idx = 0;
for (int j = 0; j < 3; j++)
{
for (int i = 0; i < 3; i++)
{
positRotationMat3x3(j, i) = positRotationArray[idx++];
}
}
}
cv::Rodrigues(positRotationMat3x3, positRotationMat1x3);
}
outRotationEstimated = positRotationMat1x3;
outTranslationEstimated = cv::Mat_(cv::Size(1, 3));
for (int i = 0; i < 3; i++)
outTranslationEstimated.at(i, 0) = positTranslationArray[i];
cvReleasePOSITObject(&positObject);
free(imagePoints_c);
free(objectPoints_c);
}
cv::Point2f ImageCoordsToIdealCameraCoords(const cv::Mat_& cameraIntrinsicParams, const cv::Point2f & pt)
{
return cv::Point2f(
( pt.x - cameraIntrinsicParams.at(0,2) ) / cameraIntrinsicParams.at(0,0),
( pt.y - cameraIntrinsicParams.at(1,2) ) / cameraIntrinsicParams.at(1,1) );
}
float DistPoint(const cv::Point2f & p1, const cv::Point2f &p2)
{
float dist = sqrt( (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y));
return dist;
}
}
void MySolvePnpPosit(const std::vector&objectPoints, const std::vector&imagePoints,
const cv::Mat_&cameraIntrinsicParams, const cv::Mat_&distCoeffs,
cv::Mat &outRotationEstimated, cv::Mat &outTranslationEstimated)
{
//TODO : undistortPoints
std::vector imagePoints_IdealCameraCoords;
for (const auto & pt : imagePoints)
{
imagePoints_IdealCameraCoords.push_back(ImageCoordsToIdealCameraCoords(cameraIntrinsicParams, pt));
}
Posit_IdealCameraCoords(objectPoints, imagePoints_IdealCameraCoords, outRotationEstimated, outTranslationEstimated);
}
Does anyone have an explanation ?
Thanks in advance
↧