700字范文,内容丰富有趣,生活中的好帮手!
700字范文 > stitching.cpp鱼眼图像拼接融合 源码分析

stitching.cpp鱼眼图像拼接融合 源码分析

时间:2019-11-09 14:36:45

相关推荐

stitching.cpp鱼眼图像拼接融合 源码分析

之前运行OpenCV官方示例的cpp时 看到stitching.cpp拼接融合还不错 然后我在MATLAB上 用之前编的经纬映射法校正三幅鱼眼图像后 不知道该怎样保存下校正好的图 如果save或者save as 那么会有figure的白色边缘 不能用来拼接 所以我直接截图 保存为jpg 这样就没有白色边缘了:

校正后的:然后把这三幅MATLAB运行出来的结果 传递给OpenCV的stitching.cpp 然后运行出来:

现在开始分析图像拼接融合的源码:stitching.cpp:

#include <iostream>

#include <fstream>

#include "opencv2/highgui/highgui.hpp"

#include "opencv2/stitching/stitcher.hpp"

using namespace std;

using namespace cv;

bool try_use_gpu = false;

vector<Mat> imgs;

string result_name = "result3.jpg"; //保存全景图的文件名 可以在工程目录下找到

void printUsage();

int parseCmdArgs(int argc, char** argv);

int main(int argc, char* argv[])

{

int retval = parseCmdArgs(argc, argv);

if (retval) return -1;

Mat pano;

Stitcher stitcher = Stitcher::createDefault(try_use_gpu); //这个函数默认不使用GPU!

Stitcher::Status status = stitcher.stitch(imgs, pano); //找寻待拼接的两幅图的旋转角度 并生成最后的全景图 这个函数才是要分析的重头戏!

if (status != Stitcher::OK) //确保拼接成功

{

cout << "Can't stitch images, error code = " << int(status) << endl;

return -1;

}

namedWindow("stitching result");

imshow("stitching result", pano);

waitKey(0);

imwrite(result_name,pano);

return 0;

}

void printUsage() //默认不用GPU

{

cout <<

"Rotation model images stitcher.\n\n"

"stitching img1 img2 [...imgN]\n\n"

"Flags:\n"

" --try_use_gpu (yes|no)\n"

" Try to use GPU. The default value is 'no'. All default values\n"

" are for CPU mode.\n"

" --output <result_img>\n"

" The default is 'result.jpg'.\n";

}

int parseCmdArgs(int argc, char** argv) //这个函数就是读入待拼接的图片 放在imgs这个装待拼图片的容器里面

{

if (argc == 1)

{

printUsage();

return -1;

}

for (int i = 1; i < argc; ++i)

{

if (string(argv[i]) == "--help" || string(argv[i]) == "/?")

{

printUsage();

return -1;

}

else if (string(argv[i]) == "--try_use_gpu")

{

if (string(argv[i + 1]) == "no")

try_use_gpu = false;

else if (string(argv[i + 1]) == "yes")

try_use_gpu = true;

else

{

cout << "Bad --try_use_gpu flag value\n";

return -1;

}

i++;

}

else if (string(argv[i]) == "--output")

{

result_name = argv[i + 1];

i++;

}

else

{

Mat img = imread(argv[i]);

if (img.empty())

{

cout << "Can't read image '" << argv[i] << "'\n";

return -1;

}

imgs.push_back(img);

}

}

return 0;

}

上面stitching.cpp很好理解 现在来看涉及图像拼接、融合的函数源码:Stitcher::Status status = stitcher.stitch(imgs, pano); 用CMake打开源码 查看源码:

可以看到stitcher类下各个函数的定义 stitch()是可重载的 而程序里用的是第一个形式 这个定义非常简单 所以接下来又需要去看Status status = estimateTransform(images, rois);(估算几幅待拼图之间的关系 比如仿射变换 旋转 平移 缩放等等)和composePanorama(pano);(图像融合 组成全景图)这两个函数的定义 estimateTransform也简短

Stitcher::Status Stitcher::estimateTransform(InputArray images, const vector<vector<Rect> > &rois)

{

images.getMatVector(imgs_);

rois_ = rois;

Status status;

if ((status = matchImages()) != OK) //要看这个的源码

return status;

//接下来还要看这个函数estimateCameraParams();(估算相机参数)的源码

estimateCameraParams();

return OK;

}

于是又接着找到:

Stitcher::Status Stitcher::matchImages()

{

if ((int)imgs_.size() < 2)

{

LOGLN("Need more images");

return ERR_NEED_MORE_IMGS; //待拼图像不能少于2幅图

}

work_scale_ = 1;

seam_work_aspect_ = 1;

seam_scale_ = 1;

bool is_work_scale_set = false;

bool is_seam_scale_set = false;

Mat full_img, img;

features_.resize(imgs_.size()); //这是??

seam_est_imgs_.resize(imgs_.size());

full_img_sizes_.resize(imgs_.size());

LOGLN("Finding features...");

#if ENABLE_LOG

int64 t = getTickCount(); //找寻特征点计时

#endif

for (size_t i = 0; i < imgs_.size(); ++i)

{

full_img = imgs_[i]; //依次读取每幅待拼图像给full_img

full_img_sizes_[i] = full_img.size(); //将每次读入图像的大小mxn给full_img_sizes

//registr_resol_是图像匹配的分辨率大小,图像的面积尺寸变为registr_resol_*100000 ??

if (registr_resol_ < 0)

{

img = full_img;

work_scale_ = 1;

is_work_scale_set = true;

}

else

{

if (!is_work_scale_set)

{

//预处理 将图像缩放full_img.size().area()表示面积m、n的乘积

//计算work_scale,将图像resize到面积在registr_resol_*10^6以下

work_scale_ = min(1.0, sqrt(registr_resol_ * 1e6 / full_img.size().area()));

is_work_scale_set = true;

}

//将full_img和img的尺寸都缩放到计算出来的work_scale_下?!

resize(full_img, img, Size(), work_scale_, work_scale_);

}

if (!is_seam_scale_set)

{

//seam_est_resol_是拼接缝像素的大小,和匹配默认值一样有默认值?

seam_scale_ = min(1.0, sqrt(seam_est_resol_ * 1e6 / full_img.size().area()));

seam_work_aspect_ = seam_scale_ / work_scale_; //和上个if中的差不多意思

is_seam_scale_set = true;

}

if (rois_.empty())

//如果rois_是空矩阵 就找寻特征点给features_ (这个待会看源码)

(*features_finder_)(img, features_[i]);

else

{

vector<Rect> rois(rois_[i].size());

for (size_t j = 0; j < rois_[i].size(); ++j)

{

Point tl(cvRound(rois_[i][j].x * work_scale_), cvRound(rois_[i][j].y * work_scale_));

Point br(cvRound(rois_[i][j].br().x * work_scale_), cvRound(rois_[i][j].br().y * work_scale_));

rois[j] = Rect(tl, br);

}

(*features_finder_)(img, features_[i], rois);

}

features_[i].img_idx = (int)i; //说明是第几幅图的特征点

LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());

//将源图像resize到seam_scale_*10^6,并存入seam_est_imgs_[]中

resize(full_img, img, Size(), seam_scale_, seam_scale_);

seam_est_imgs_[i] = img.clone();

}

// Do it to save memory

features_finder_->collectGarbage(); //这里要看源码!!怎么找寻的

full_img.release(); //release释放 待会儿去读下一幅图

img.release();

//找寻特征点至此结束 计算出时间

LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

LOG("Pairwise matching");

#if ENABLE_LOG

t = getTickCount(); //开始进行匹配match

#endif

(*features_matcher_)(features_, pairwise_matches_, matching_mask_);

features_matcher_->collectGarbage(); //要看源码!!

LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); //匹配结束

// Leave only images we are sure are from the same panorama

//conf_thresh_是两幅图来自同一全景图的置信度 来判断读入的图片是否属于同一全景图

indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);

vector<Mat> seam_est_imgs_subset;

vector<Mat> imgs_subset;

vector<Size> full_img_sizes_subset;

for (size_t i = 0; i < indices_.size(); ++i)

{

imgs_subset.push_back(imgs_[indices_[i]]);

seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);

full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);

}

seam_est_imgs_ = seam_est_imgs_subset;

imgs_ = imgs_subset;

full_img_sizes_ = full_img_sizes_subset;

//检查由上述筛选来自同一副全景图的图片的数量是否大于2幅图

if ((int)imgs_.size() < 2)

{

LOGLN("Need more images");

return ERR_NEED_MORE_IMGS;

}

return OK;

}

结合这个人的/hanshuning/article/details/41960401和这个人的/content-10-8390-1.html分析上面的

在matchImage()分析完后 要分析estimateCameraParams():

void Stitcher::estimateCameraParams()

{

detail::HomographyBasedEstimator estimator;

estimator(features_, pairwise_matches_, cameras_);

for (size_t i = 0; i < cameras_.size(); ++i)

{

Mat R;

cameras_[i].R.convertTo(R, CV_32F);

cameras_[i].R = R;

LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());

}

bundle_adjuster_->setConfThresh(conf_thresh_);

(*bundle_adjuster_)(features_, pairwise_matches_, cameras_);

// Find median focal length and use it as final image scale

vector<double> focals;

for (size_t i = 0; i < cameras_.size(); ++i)

{

LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());

focals.push_back(cameras_[i].focal);

}

std::sort(focals.begin(), focals.end());

if (focals.size() % 2 == 1)

warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);

else

warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

if (do_wave_correct_)

{

vector<Mat> rmats;

for (size_t i = 0; i < cameras_.size(); ++i)

rmats.push_back(cameras_[i].R);

detail::waveCorrect(rmats, wave_correct_kind_);

for (size_t i = 0; i < cameras_.size(); ++i)

cameras_[i].R = rmats[i];

}

}

接下来生成全景图(图像融合等):composePanorama(pano);即看这个函数的源码

Stitcher::Status Stitcher::composePanorama(OutputArray pano)

{

return composePanorama(vector<Mat>(), pano); //就这一句 继续追踪下去 查看composePanorama(vector<Mat>(), pano);的源码

}

如下所示:

Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)

{

LOGLN("Warping images (auxiliary)... ");

vector<Mat> imgs;

images.getMatVector(imgs);

if (!imgs.empty())

{

CV_Assert(imgs.size() == imgs_.size());

Mat img;

seam_est_imgs_.resize(imgs.size());

for (size_t i = 0; i < imgs.size(); ++i)

{

imgs_[i] = imgs[i];

resize(imgs[i], img, Size(), seam_scale_, seam_scale_);

seam_est_imgs_[i] = img.clone();

}

vector<Mat> seam_est_imgs_subset;

vector<Mat> imgs_subset;

for (size_t i = 0; i < indices_.size(); ++i)

{

imgs_subset.push_back(imgs_[indices_[i]]);

seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);

}

seam_est_imgs_ = seam_est_imgs_subset;

imgs_ = imgs_subset;

}

Mat &pano_ = pano.getMatRef();

#if ENABLE_LOG

int64 t = getTickCount();

#endif

vector<Point> corners(imgs_.size());

vector<Mat> masks_warped(imgs_.size());

vector<Mat> images_warped(imgs_.size());

vector<Size> sizes(imgs_.size());

vector<Mat> masks(imgs_.size());

// Prepare image masks

for (size_t i = 0; i < imgs_.size(); ++i)

{

masks[i].create(seam_est_imgs_[i].size(), CV_8U);

masks[i].setTo(Scalar::all(255));

}

// Warp images and their masks

Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));

for (size_t i = 0; i < imgs_.size(); ++i)

{

Mat_<float> K;

cameras_[i].K().convertTo(K, CV_32F);

K(0,0) *= (float)seam_work_aspect_;

K(0,2) *= (float)seam_work_aspect_;

K(1,1) *= (float)seam_work_aspect_;

K(1,2) *= (float)seam_work_aspect_;

corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);

sizes[i] = images_warped[i].size();

w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);

}

vector<Mat> images_warped_f(imgs_.size());

for (size_t i = 0; i < imgs_.size(); ++i)

images_warped[i].convertTo(images_warped_f[i], CV_32F);

LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

// Find seams

exposure_comp_->feed(corners, images_warped, masks_warped);

seam_finder_->find(images_warped_f, corners, masks_warped);

// Release unused memory

seam_est_imgs_.clear();

images_warped.clear();

images_warped_f.clear();

masks.clear();

LOGLN("Compositing...");

#if ENABLE_LOG

t = getTickCount();

#endif

Mat img_warped, img_warped_s;

Mat dilated_mask, seam_mask, mask, mask_warped;

//double compose_seam_aspect = 1;

double compose_work_aspect = 1;

bool is_blender_prepared = false;

double compose_scale = 1;

bool is_compose_scale_set = false;

Mat full_img, img;

for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)

{

LOGLN("Compositing image #" << indices_[img_idx] + 1);

// Read image and resize it if necessary

full_img = imgs_[img_idx];

if (!is_compose_scale_set)

{

if (compose_resol_ > 0)

compose_scale = min(1.0, sqrt(compose_resol_ * 1e6 / full_img.size().area()));

is_compose_scale_set = true;

// Compute relative scales

//compose_seam_aspect = compose_scale / seam_scale_;

compose_work_aspect = compose_scale / work_scale_;

// Update warped image scale

warped_image_scale_ *= static_cast<float>(compose_work_aspect);

w = warper_->create((float)warped_image_scale_);

// Update corners and sizes

for (size_t i = 0; i < imgs_.size(); ++i)

{

// Update intrinsics

cameras_[i].focal *= compose_work_aspect;

cameras_[i].ppx *= compose_work_aspect;

cameras_[i].ppy *= compose_work_aspect;

// Update corner and size

Size sz = full_img_sizes_[i];

if (std::abs(compose_scale - 1) > 1e-1)

{

sz.width = cvRound(full_img_sizes_[i].width * compose_scale);

sz.height = cvRound(full_img_sizes_[i].height * compose_scale);

}

Mat K;

cameras_[i].K().convertTo(K, CV_32F);

Rect roi = w->warpRoi(sz, K, cameras_[i].R);

corners[i] = roi.tl();

sizes[i] = roi.size();

}

}

if (std::abs(compose_scale - 1) > 1e-1)

resize(full_img, img, Size(), compose_scale, compose_scale);

else

img = full_img;

full_img.release();

Size img_size = img.size();

Mat K;

cameras_[img_idx].K().convertTo(K, CV_32F);

// Warp the current image

w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

// Warp the current image mask

mask.create(img_size, CV_8U);

mask.setTo(Scalar::all(255));

w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

// Compensate exposure

exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);

img_warped.convertTo(img_warped_s, CV_16S);

img_warped.release();

img.release();

mask.release();

// Make sure seam mask has proper size

dilate(masks_warped[img_idx], dilated_mask, Mat());

resize(dilated_mask, seam_mask, mask_warped.size());

mask_warped = seam_mask & mask_warped;

if (!is_blender_prepared)

{

blender_->prepare(corners, sizes);

is_blender_prepared = true;

}

// Blend the current image

blender_->feed(img_warped_s, mask_warped, corners[img_idx]);

}

Mat result, result_mask;

blender_->blend(result, result_mask);

LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

// Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,

// so convert it to avoid user confusing

result.convertTo(pano_, CV_8U);

return OK;

}

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。