700字范文,内容丰富有趣,生活中的好帮手!
700字范文 > 要matlab标定数据做双目相机矫正OpenCV C++

要matlab标定数据做双目相机矫正OpenCV C++

时间:2019-03-12 18:16:15

相关推荐

要matlab标定数据做双目相机矫正OpenCV C++

双目相机矫正

系列文章来了,C/CPP实现双目矫正(不使用OpenCV)及矫正源码解析正在更新中。

开始本文内容

标定步骤:

matlab标定较为准确,命令行中输入stereoCameraCalibratorenter

添加左图 右图 确定

选择畸变参数,calibratior

拖拉红线,删除误差大的图像对,使投影误差小于0.1最好。然后导出标定参数。

需要用到 上图中的R T是右相机相对于左的旋转平移矩阵,属外参

和相机内参(下图) 其中畸变注意顺序k1 k2 d1 d2 k3

/******************************//*立体匹配*//******************************/#include <opencv2/opencv.hpp> #include <iostream> #include <fstream>using namespace std;using namespace cv;const int imageWidth = 640;//摄像头的分辨率 const int imageHeight = 480;Size imageSize = Size(imageWidth, imageHeight);Mat rgbImageL, grayImageL;Mat rgbImageR, grayImageR;Mat rectifyImageL, rectifyImageR;Mat mapLx, mapLy, mapRx, mapRy;//映射表 Mat Rl, Rr, Pl, Pr, Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵QRect validROIL, validROIR;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域/*事先matlab标定好的相机参数fx 0 cx0 fy cy0 0 1*/Mat cameraMatrixL = (Mat_<double>(3, 3) << 4.641649743166592e+02, 0.203084599224514, 3.469714483998573e+02,0, 4.639793546994110e+02, 2.396244369267172e+02,0, 0, 1);Mat distCoeffL = (Mat_<double>(5, 1) << -0.010300050601310, 0.064379489444383, -2.934773993455599e-07, -6.021176879967455e-04, -0.125003779025649);Mat cameraMatrixR = (Mat_<double>(3, 3) << 4.613690699223143e+02, 0.063516507352252, 3.374687093675794e+02,0, 4.614334172321526e+02, 2.387183895998068e+02,0, 0, 1);Mat distCoeffR = (Mat_<double>(5, 1) << -0.010376238721412, 0.031853049017904, 9.707295405719327e-05, 4.876769556016511e-04, -0.066752763994806);/*float cameraMatrixL[3][3] = { {4.641649743166592e+02, 0.203084599224514, 3.469714483998573e+02},{0, 4.639793546994110e+02, 2.396244369267172e+02 },{0, 0, 1} };float distCoeffL[5] = { -0.010300050601310, 0.064379489444383, -2.934773993455599e-07, -6.021176879967455e-04, -0.125003779025649 };float cameraMatrixR[3][3] = { {4.613690699223143e+02, 0.063516507352252, 3.374687093675794e+02},{0, 4.614334172321526e+02, 2.387183895998068e+02 },{0, 0, 1} };float distCoeffR[5] = { -0.010376238721412, 0.031853049017904, 9.707295405719327e-05, 4.876769556016511e-04, -0.066752763994806 };*/Mat T = (Mat_<double>(3, 1) << -17.297530803766026, 0.090261340075926, 0.278464128990403);//T平移向量//Mat rec = (Mat_<double>(3, 1) << -0.02302, -0.00735, 0.00089);//rec旋转向量//Mat R;//R 旋转矩阵Mat R = (Mat_<double>(3, 3) << 0.999960841819793, -8.857049138945840e-05, 0.008849123251389,1.042143805604291e-04, 0.999998432715734, -0.001767400757825,-0.008848952842744, 0.001768253755526, 0.999959283827218); // 无需转置//声明函数 void GetMatrix(float *R, float *P, float *PRI);void GetMap(float *K, float *D, float *PRI, float *mapx, float *mapy, Size size);void Imgremap(Mat srcImg, Mat &dstImg, float *mapx, float *mapy);void Imgremap_new(Mat srcImg, Mat &dstImg, float *mapx, float *mapy);int main(){/*立体校正*/Mat API_img = imread("D:/code/Test/left/left_001.png", 0);Size ImgSize(640, 480);stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,0, imageSize, &validROIL, &validROIR);Mat API_maplx, API_maply;initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl, ImgSize, CV_32FC1, API_maplx, API_maply);clock_t start_API, end_API;Mat API_unimg;start_API = clock();remap(API_img, API_unimg, API_maplx, API_maply, INTER_LINEAR);end_API = clock();cout << "API run time:" << (double)(end_API - start_API) << endl;imshow("left01_api.jpg", API_unimg);printf("opencv api end\n");//Rodrigues(rec, R); //Rodrigues变换stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,0, imageSize, &validROIL, &validROIR);/* 求逆矩阵 */float PRIl[3][3];float PRIr[3][3];memset(PRIl, 0, sizeof(PRIl));memset(PRIr, 0, sizeof(PRIr));float RL[3][3];float RR[3][3];float PL[3][3];float PR[3][3];//typedef BOOST_TYPEOF(*Rl.data) ElementType;for (int i = 0; i < Rl.rows; i++){for (int j = 0; j < Rl.cols; j++){RL[i][j] = Rl.at<double>(i, j);RR[i][j] = Rr.at<double>(i, j);cout << "RL " << RL[i][j] << " Rl " << Rl.at<double>(i, j) << " ";}cout << "\n";}cout << " RL Value is: \n" << (float*)RL << *RL << RL[0][0] << endl;cout << " Rl Value is: \n" << Rl << endl;for (int i = 0; i < 3; i++){for (int j = 0; j < 3; j++){PL[i][j] = Pl.at<double>(i, j);PR[i][j] = Pr.at<double>(i, j);cout << "PR " << PR[i][j] << " ";}}cout << " Pl Value is: \n" << Pl << endl;cout << " PL Value is: \n" << PL << endl;GetMatrix((float*)RL, (float*)PL, (float*)PRIl);GetMatrix((float*)RR, (float*)PR, (float*)PRIr);/* 获取映射表 *///Size ImgSize(640, 480);float *maplx = new float[ImgSize.width * ImgSize.height];float *maply = new float[ImgSize.width * ImgSize.height];float *maprx = new float[ImgSize.width * ImgSize.height];float *mapry = new float[ImgSize.width * ImgSize.height];float cameraMatrixl[3][3];float cameraMatrixr[3][3];float distCoeffl[5];float distCoeffr[5];for (int i = 0; i < 3; i++){for (int j = 0; j < 3; j++){cameraMatrixl[i][j] = cameraMatrixL.at<double>(i, j);cameraMatrixr[i][j] = cameraMatrixR.at<double>(i, j);cout << "cameraMatrixr " << cameraMatrixr[i][j] << " cameraMatrixR " << cameraMatrixR.at<double>(i, j) << " ";}}for (int j = 0; j < 5; j++){distCoeffl[j] = distCoeffL.at<double>(j);distCoeffr[j] = distCoeffR.at<double>(j);}GetMap((float*)cameraMatrixl, (float*)distCoeffl, (float*)PRIl, maplx, maply, ImgSize);GetMap((float*)cameraMatrixr, (float*)distCoeffr, (float*)PRIr, maprx, mapry, ImgSize);cout << "\nmaplx\n" << *maplx << maplx << ImgSize << endl;Mat imgl = imread("D:/Github/left/left_001.png", 0);Mat imgr = imread("D:/Github/right/right_001.png", 0);//Mat imgl = imread("F:/opencv/sources/samples/data/left02.jpg", 0);//Mat imgr = imread("F:/opencv/sources/samples/data/right02.jpg", 0);Mat unimgl(imgl.rows, imgl.cols, CV_8UC1);Mat unimgr(imgl.rows, imgl.cols, CV_8UC1);clock_t start_MY, end_MY;start_MY = clock();Imgremap_new(imgl, unimgl, maplx, maply);Imgremap_new(imgr, unimgr, maprx, mapry);end_MY = clock();cout << " run time:" << (double)(end_MY - start_MY) << endl;//画上对应的线条for (int i = 20; i < imgl.rows; i += 32)line(imgl, Point(0, i), Point(imgl.cols, i), Scalar(0, 255, 0), 1, 8);imshow("left01.jpg", imgl);imshow("right01.jpg", imgr);imshow("left01_MY.jpg", unimgl);imshow("right01_MY.jpg", unimgr);int ww = imgl.cols;int hh = imgl.rows;int cc = imgl.channels();cout << "org images shape: [%d, %d, %d]\n" << ww<< hh<< cc << endl;int w = unimgl.cols;int h = unimgl.rows;int c = unimgl.channels();cout << "rec images shape: [%d, %d, %d]\n" << w << h << c << endl;//imwrite("left01_MY.jpg", unimgl);//imwrite("right01_MY.jpg", unimgr);waitKey(0);delete[] maplx;delete[] maply;delete[] maprx;delete[] mapry;system("pause");return 0;}/** brief 获取矩阵PR的逆矩阵* param R输入,旋转矩阵指针,双目标定获得* param P输入,映射矩阵指针,双目标定获得* param PRI 输出,逆矩阵指针*/void GetMatrix(float *R, float *P, float *PRI){float temp;float PR[3][3];memset(PR, 0, sizeof(PR));/* 矩阵P*R */for (int i = 0; i < 3; i++){for (int k = 0; k < 3; k++){temp = P[i * 3 + k];cout << "GetMatrix PL Value is: \n" << temp << endl;for (int j = 0; j < 3; j++){PR[i][j] += temp * R[k * 3 + j];}}}cout << "GetMatrix PL Value is: " << temp << endl;/* 3X3矩阵求逆 */temp = PR[0][0] * PR[1][1] * PR[2][2] + PR[0][1] * PR[1][2] * PR[2][0] + PR[0][2] * PR[1][0] * PR[2][1] -PR[0][2] * PR[1][1] * PR[2][0] - PR[0][1] * PR[1][0] * PR[2][2] - PR[0][0] * PR[1][2] * PR[2][1];PRI[0] = (PR[1][1] * PR[2][2] - PR[1][2] * PR[2][1]) / temp;PRI[1] = -(PR[0][1] * PR[2][2] - PR[0][2] * PR[2][1]) / temp;PRI[2] = (PR[0][1] * PR[1][2] - PR[0][2] * PR[1][1]) / temp;PRI[3] = -(PR[1][0] * PR[2][2] - PR[1][2] * PR[2][0]) / temp;PRI[4] = (PR[0][0] * PR[2][2] - PR[0][2] * PR[2][0]) / temp;PRI[5] = -(PR[0][0] * PR[1][2] - PR[0][2] * PR[1][0]) / temp;PRI[6] = (PR[1][0] * PR[2][1] - PR[1][1] * PR[2][0]) / temp;PRI[7] = -(PR[0][0] * PR[2][1] - PR[0][1] * PR[2][0]) / temp;PRI[8] = (PR[0][0] * PR[1][1] - PR[0][1] * PR[1][0]) / temp;return;}/** brief 获取映射表* param K输入,相机内参矩阵指针,单目标定获得* param D输入,相机畸变矩阵指针,单目标定获得* param PRI输入,P*R的逆矩阵* param mapx输出,x坐标映射表* param mapy输出,y坐标映射表* param size输入,图像分辨率*/void GetMap(float *K, float *D, float *PRI, float *mapx, float *mapy, Size size){float fx = K[0];float fy = K[4];float u0 = K[2];float v0 = K[5];float k1 = D[0];float k2 = D[1];float p1 = D[2];float p2 = D[3];float k3 = D[4];/* 学术大佬研究的模型:去除畸变+双目校正 */for (int i = 0; i < size.height; i++){float _x = i * PRI[1] + PRI[2];float _y = i * PRI[4] + PRI[5];float _w = i * PRI[7] + PRI[8];for (int j = 0; j < size.width; j++, _x += PRI[0], _y += PRI[3], _w += PRI[6]){float w = 1. / _w;float x = _x * w;float y = _y * w;float x2 = x * x;float y2 = y * y;float r2 = x2 + y2;float _2xy = 2 * x * y;float kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;float u = fx * (x*kr + p1 * _2xy + p2 * (r2 + 2 * x2)) + u0;float v = fy * (y*kr + p1 * (r2 + 2 * y2) + p2 * _2xy) + v0;mapx[i*size.width + j] = u;mapy[i*size.width + j] = v;}}}/** brief 执行校正过程* param srcImg输入,原图数据* param dstImg输入,校正后图像数据* param mapx输入,x坐标映射表* param mapy输入,y坐标映射表*/void Imgremap(Mat srcImg, Mat &dstImg, float *mapx, float *mapy){int x, y;float u, v;/* 纯浮点运算,执行映射+插值过程 */for (int i = 0; i < srcImg.rows; i++){for (int j = 0; j < srcImg.cols; j++){x = (int)mapx[i*srcImg.cols + j];y = (int)mapy[i*srcImg.cols + j];if (x > 1 && x < (srcImg.cols - 1) && y > 1 && y < (srcImg.rows - 1)){u = mapx[i*srcImg.cols + j] - x;v = mapy[i*srcImg.cols + j] - y;dstImg.ptr<uchar>(i)[j] = (uchar)((1 - u)*(1 - v)*srcImg.ptr<uchar>(int(y))[int(x)] + (1 - u)*v*srcImg.ptr<uchar>(int(y + 1))[int(x)]+ u * (1 - v)*srcImg.ptr<uchar>(int(y))[int(x + 1)] + u * v*srcImg.ptr<uchar>(int(y + 1))[int(x + 1)]);//cout << (int)(dstImg.ptr<uchar>(i)[j]) << endl;}else{dstImg.ptr<uchar>(i)[j] = 0;}}}}/** brief 执行校正过程* param srcImg输入,原图数据* param dstImg输入,校正后图像数据* param mapx输入,x坐标映射表* param mapy输入,y坐标映射表*/void Imgremap_new(Mat srcImg, Mat &dstImg, float *mapx, float *mapy){/* 浮点转定点运算,执行映射+插值过程 */for (int i = 0; i < srcImg.rows; ++i){for (int j = 0; j < srcImg.cols; ++j){int pdata = i * srcImg.cols + j;int x = (int)mapx[pdata];int y = (int)mapy[pdata];short PartX = (mapx[pdata] - x) * 2048;short PartY = (mapy[pdata] - y) * 2048;short InvX = 2048 - PartX;short InvY = 2048 - PartY;if (x > 1 && x < (srcImg.cols - 1) && y > 1 && y < (srcImg.rows - 1)){dstImg.ptr<uchar>(i)[j] = (((InvX*srcImg.ptr<uchar>(y)[x] + PartX * srcImg.ptr<uchar>(y)[x + 1])*InvY +(InvX*srcImg.ptr<uchar>(y + 1)[x] + PartX * srcImg.ptr<uchar>(y + 1)[x + 1])*PartY) >> 22);//cout << (int)(dstImg.ptr<uchar>(i)[j]) << endl;}else{dstImg.ptr<uchar>(i)[j] = 0;}}}}

/******************************//* 立体匹配和测距 *//******************************/#include <opencv2/opencv.hpp> #include <iostream> #include <fstream>using namespace std;using namespace cv;const int imageWidth = 640; //摄像头的分辨率 const int imageHeight = 480;Size imageSize = Size(imageWidth, imageHeight);Mat rgbImageL, grayImageL;Mat rgbImageR, grayImageR;Mat rectifyImageL, rectifyImageR;Mat mapLx, mapLy, mapRx, mapRy;//映射表 Mat Rl, Rr, Pl, Pr, Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵QRect validROIL, validROIR;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域/*事先matlab标定好的相机的参数fx 0 cx0 fy cy0 0 1*/Mat cameraMatrixL = (Mat_<double>(3, 3) << 4.641649743166592e+02, 0.203084599224514, 3.469714483998573e+02,0, 4.639793546994110e+02, 2.396244369267172e+02,0, 0, 1);Mat distCoeffL = (Mat_<double>(5, 1) << -0.010100050601310, 0.064379489444383, -2.934773993455599e-07, -6.021176879967455e-04, -0.125003779025649);Mat cameraMatrixR = (Mat_<double>(3, 3) << 4.613690699223143e+02, 0.063516507352252, 3.374687093675794e+02,0, 4.614334172321526e+02, 2.387183895998068e+02,0, 0, 1);Mat distCoeffR = (Mat_<double>(5, 1) << -0.010376238721412, 0.031853049017904, 9.707295405719327e-05, 4.876769556016511e-04, -0.066752763994806);Mat T = (Mat_<double>(3, 1) << -17.297530803766026, 0.090261340075926, 0.278464128990403);//T平移向量//Mat rec = (Mat_<double>(3, 1) << -0.02302, -0.00735, 0.00089);//rec旋转向量//Mat R;//R 旋转矩阵Mat R = (Mat_<double>(3, 3) << 0.999960841819793, -8.857049138945840e-05, 0.008849123251389,1.042143805604291e-04, 0.999998432715734, -0.001767400757825,-0.008848952842744, 0.001768253755526, 0.999959283827218); // 无需转置/*****主函数*****/int main(){std::ofstream Fs_mapLx("E:\\cpp\\Project1\\mapLx1.txt");std::ofstream Fs_mapLy("E:\\cpp\\Project1\\mapLy1.txt");std::ofstream Fs_mapRx("E:\\cpp\\Project1\\mapRx1.txt");std::ofstream Fs_mapRy("E:\\cpp\\Project1\\mapRy1.txt");//立体校正//Rodrigues(rec, R); //Rodrigues变换stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,0, imageSize, &validROIL, &validROIR);initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);int he = mapLx.rows;int wi = mapLx.cols;// 保存校准数据for (int i = 0; i < he; i++){for (int j = 0; j < wi; j++){Fs_mapLx << (float)mapLx.ptr<float>(i)[j] << "\t";Fs_mapLy << (float)mapLy.ptr<float>(i)[j] << "\t";Fs_mapRx << (float)mapRx.ptr<float>(i)[j] << "\t";Fs_mapRy << (float)mapRy.ptr<float>(i)[j] << "\t";}Fs_mapLx << endl;Fs_mapLy << endl;Fs_mapRx << endl;Fs_mapRy << endl;}Fs_mapLx.close();Fs_mapLy.close();Fs_mapRx.close();Fs_mapRy.close();return 0;}

下面是OpenCV中标定、矫正函数中各个参数的解释

/** @brief Stereo rectification for fisheye camera model@param K1 First camera intrinsic matrix.@param D1 First camera distortion parameters.@param K2 Second camera intrinsic matrix.@param D2 Second camera distortion parameters.@param imageSize Size of the image used for stereo calibration.@param R Rotation matrix between the coordinate systems of the first and the secondcameras.@param tvec Translation vector between coordinate systems of the cameras.@param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.@param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.@param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the firstcamera.@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the secondcamera.@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).@param flags Operation flags that may be zero or @ref fisheye::CALIB_ZERO_DISPARITY . If the flag is set,the function makes the principal points of each camera have the same pixel coordinates in therectified views. And if the flag is not set, the function may still shift the images in thehorizontal or vertical direction (depending on the orientation of epipolar lines) to maximize theuseful image area.@param newImageSize New image resolution after rectification. The same size should be passed toinitUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)is passed (default), it is set to the original imageSize . Setting it to larger value can help youpreserve details in the original image, especially when there is a big radial distortion.@param balance Sets the new focal length in range between the min focal length and the max focallength. Balance is in range of [0, 1].@param fov_scale Divisor for new focal length.*/CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),double balance = 0.0, double fov_scale = 1.0);/** @brief Performs stereo calibration@param objectPoints Vector of vectors of the calibration pattern points.@param imagePoints1 Vector of vectors of the projections of the calibration pattern points,observed by the first camera.@param imagePoints2 Vector of vectors of the projections of the calibration pattern points,observed by the second camera.@param K1 Input/output first camera intrinsic matrix:\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . Ifany of @ref fisheye::CALIB_USE_INTRINSIC_GUESS , @ref fisheye::CALIB_FIX_INTRINSIC are specified,some or all of the matrix components must be initialized.@param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements.@param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .@param D2 Input/output lens distortion coefficients for the second camera. The parameter issimilar to D1 .@param imageSize Size of the image used only to initialize camera intrinsic matrix.@param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.@param T Output translation vector between the coordinate systems of the cameras.@param flags Different flags that may be zero or a combination of the following values:- @ref fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matricesare estimated.- @ref fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values offx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the imagecenter (imageSize is used), and focal distances are computed in a least-squares fashion.- @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iterationof intrinsic optimization.- @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number.- @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.- @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stayzero.@param criteria Termination criteria for the iterative optimization algorithm.*/CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC,TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));/** @brief Computes the undistortion and rectification transformation map.The function computes the joint undistortion and rectification transformation and represents theresult in the form of maps for remap. The undistorted image looks like original, as if it iscaptured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of amonocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by#getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .Also, this new camera is oriented differently in the coordinate space, according to R. That, forexample, helps to align two heads of a stereo camera so that the epipolar lines on both imagesbecome horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).The function actually builds the maps for the inverse mapping algorithm that is used by remap. Thatis, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the functioncomputes the corresponding coordinates in the source image (that is, in the original image fromcamera). The following process is applied:\f[\begin{array}{l}x \leftarrow (u - {c'}_x)/{f'}_x \\y \leftarrow (v - {c'}_y)/{f'}_y \\{[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\x' \leftarrow X/W \\y' \leftarrow Y/W \\r^2 \leftarrow x'^2 + y'^2 \\x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}+ 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}+ p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\s\vecthree{x'''}{y'''}{1} =\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\map_x(u,v) \leftarrow x''' f_x + c_x \\map_y(u,v) \leftarrow y''' f_y + c_y\end{array}\f]where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$are the distortion coefficients.In case of a stereo camera, this function is called twice: once for each camera head, afterstereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camerawas not calibrated, it is still possible to compute the rectification transformations directly fromthe fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computeshomography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3Dspace. R can be computed from H as\f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f]where cameraMatrix can be chosen arbitrarily.@param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .@param distCoeffs Input vector of distortion coefficients\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformationis assumed. In cvInitUndistortMap R assumed to be an identity matrix.@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$.@param size Undistorted image size.@param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps@param map1 The first output map.@param map2 The second output map.*/CV_EXPORTS_Wvoid initUndistortRectifyMap(InputArray cameraMatrix, InputArray distCoeffs,InputArray R, InputArray newCameraMatrix,Size size, int m1type, OutputArray map1, OutputArray map2);

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。