OpenCV常用图像拼接方法(三):基于特征匹配拼接
OpenCV常用图像拼接方法将分为四个部分与大家共享,这里是第三种方法,欢迎关注后续。OpenCV的常用图像拼接方法(三):基于特征匹配的图像拼接,本次介绍SIFT特征匹配拼接方法,OpenCV版本为4.4.0。特点和适用范围:图像有足够重合相同特征区域,且待拼接图像之间无明显尺度变换和畸变。优点:适应部分倾斜变化情况。缺点:需要有足够的相同特征区域进行匹配,速度较慢,拼接较大图片容易崩溃。如下是
·
OpenCV常用图像拼接方法将分为四个部分与大家共享,这里是第三种方法,欢迎关注后续。
OpenCV的常用图像拼接方法(三):基于特征匹配的图像拼接,本次介绍SIFT特征匹配拼接方法,OpenCV版本为4.4.0。特点和适用范围:图像有足够重合相同特征区域,且待拼接图像之间无明显尺度变换和畸变。
优点:适应部分倾斜变化情况。缺点:需要有足够的相同特征区域进行匹配,速度较慢,拼接较大图片容易崩溃。
如下是待拼接的两张图片:
特征匹配图:
拼接结果图:
拼接缝处理后(拼接处过渡更自然):
核心代码:
/********************直接图像拼接函数*************************/
bool ImageOverlap0(Mat &img1, Mat &img2)
{
Mat g1(img1, Rect(0, 0, img1.cols, img1.rows)); // init roi
Mat g2(img2, Rect(0, 0, img2.cols, img2.rows));
cvtColor(g1, g1, COLOR_BGR2GRAY);
cvtColor(g2, g2, COLOR_BGR2GRAY);
vector<cv::KeyPoint> keypoints_roi, keypoints_img; /* keypoints found using SIFT */
Mat descriptor_roi, descriptor_img; /* Descriptors for SIFT */
FlannBasedMatcher matcher; /* FLANN based matcher to match keypoints */
vector<cv::DMatch> matches, good_matches;
cv::Ptr<cv::SIFT> sift = cv::SIFT::create();
int i, dist = 80;
sift->detectAndCompute(g1, cv::Mat(), keypoints_roi, descriptor_roi); /* get keypoints of ROI image */
sift->detectAndCompute(g2, cv::Mat(), keypoints_img, descriptor_img); /* get keypoints of the image */
matcher.match(descriptor_roi, descriptor_img, matches); //实现描述符之间的匹配
double max_dist = 0; double min_dist = 5000;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptor_roi.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
// 特征点筛选
for (i = 0; i < descriptor_roi.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
printf("%ld no. of matched keypoints in right image\n", good_matches.size());
/* Draw matched keypoints */
Mat img_matches;
//绘制匹配
drawMatches(img1, keypoints_roi, img2, keypoints_img,
good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("matches", img_matches);
vector<Point2f> keypoints1, keypoints2;
for (i = 0; i < good_matches.size(); i++)
{
keypoints1.push_back(keypoints_img[good_matches[i].trainIdx].pt);
keypoints2.push_back(keypoints_roi[good_matches[i].queryIdx].pt);
}
//计算单应矩阵(仿射变换矩阵)
Mat H = findHomography(keypoints1, keypoints2, RANSAC);
Mat H2 = findHomography(keypoints2, keypoints1, RANSAC);
Mat stitchedImage; //定义仿射变换后的图像(也是拼接结果图像)
Mat stitchedImage2; //定义仿射变换后的图像(也是拼接结果图像)
int mRows = img2.rows;
if (img1.rows > img2.rows)
{
mRows = img1.rows;
}
int count = 0;
for (int i = 0; i < keypoints2.size(); i++)
{
if (keypoints2[i].x >= img2.cols / 2)
count++;
}
//判断匹配点位置来决定图片是左还是右
if (count / float(keypoints2.size()) >= 0.5) //待拼接img2图像在右边
{
cout << "img1 should be left" << endl;
vector<Point2f>corners(4);
vector<Point2f>corners2(4);
corners[0] = Point(0, 0);
corners[1] = Point(0, img2.rows);
corners[2] = Point(img2.cols, img2.rows);
corners[3] = Point(img2.cols, 0);
stitchedImage = Mat::zeros(img2.cols + img1.cols, mRows, CV_8UC3);
warpPerspective(img2, stitchedImage, H, Size(img2.cols + img1.cols, mRows));
perspectiveTransform(corners, corners2, H);
/*
circle(stitchedImage, corners2[0], 5, Scalar(0, 255, 0), 2, 8);
circle(stitchedImage, corners2[1], 5, Scalar(0, 255, 255), 2, 8);
circle(stitchedImage, corners2[2], 5, Scalar(0, 255, 0), 2, 8);
circle(stitchedImage, corners2[3], 5, Scalar(0, 255, 0), 2, 8); */
cout << corners2[0].x << ", " << corners2[0].y << endl;
cout << corners2[1].x << ", " << corners2[1].y << endl;
imshow("temp", stitchedImage);
//imwrite("temp.jpg", stitchedImage);
Mat half(stitchedImage, Rect(0, 0, img1.cols, img1.rows));
img1.copyTo(half);
imshow("result", stitchedImage);
}
else //待拼接图像img2在左边
{
cout << "img2 should be left" << endl;
stitchedImage = Mat::zeros(img2.cols + img1.cols, mRows, CV_8UC3);
warpPerspective(img1, stitchedImage, H2, Size(img1.cols + img2.cols, mRows));
imshow("temp", stitchedImage);
//计算仿射变换后的四个端点
vector<Point2f>corners(4);
vector<Point2f>corners2(4);
corners[0] = Point(0, 0);
corners[1] = Point(0, img1.rows);
corners[2] = Point(img1.cols, img1.rows);
corners[3] = Point(img1.cols, 0);
perspectiveTransform(corners, corners2, H2); //仿射变换对应端点
/*
circle(stitchedImage, corners2[0], 5, Scalar(0, 255, 0), 2, 8);
circle(stitchedImage, corners2[1], 5, Scalar(0, 255, 255), 2, 8);
circle(stitchedImage, corners2[2], 5, Scalar(0, 255, 0), 2, 8);
circle(stitchedImage, corners2[3], 5, Scalar(0, 255, 0), 2, 8); */
cout << corners2[0].x << ", " << corners2[0].y << endl;
cout << corners2[1].x << ", " << corners2[1].y << endl;
Mat half(stitchedImage, Rect(0, 0, img2.cols, img2.rows));
img2.copyTo(half);
imshow("result", stitchedImage);
}
imwrite("result.bmp", stitchedImage);
return true;
}
拼接缝优化代码与完整源码素材将发布在知识星球主题中。
公众号:OpenCV与AI深度学习
开放原子开发者工作坊旨在鼓励更多人参与开源活动,与志同道合的开发者们相互交流开发经验、分享开发心得、获取前沿技术趋势。工作坊有多种形式的开发者活动,如meetup、训练营等,主打技术交流,干货满满,真诚地邀请各位开发者共同参与!
更多推荐
已为社区贡献11条内容
所有评论(0)