全部代码如下:
#include
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
int main()
{
double start = clock();
//【1】加载源图像
Mat src1 = imread("26.png", 1);//小图
Mat src2 = imread("25.png", 1);//大图
if ((src1.cols == 0) || (src2.cols == 0))
{
cout << "can not find input picture!" << endl;
return 0;
}
int minHessian = 1000;//定义SURF中hessian阈值特征点检测算子
SurfFeatureDetector detector(minHessian);//构造surf特征检测器(类),定义一个SurfFeatureDetector特征检测类对象
vector<KeyPoint>keyPoint1, keyPoint2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据
//【2】调用detect函数检测出SURF特征点,保存在vector容器中
detector.detect(src1, keyPoint1);
detector.detect(src2, keyPoint2);
/*计算特征向量*/
SurfDescriptorExtractor extractor;//构造SURF描述子提取器
Mat descriptors1, descriptors2; //提取SURF描述子
extractor.compute(src1, keyPoint1, descriptors1);
extractor.compute(src2, keyPoint2, descriptors2);
//使用FLANNY匹配算子进行匹配
FlannBasedMatcher FLMatcher;//构造匹配器
vector<DMatch>matches;//匹配两幅图的描述子
FLMatcher.match(descriptors1, descriptors2, matches);
double max_dist = 0; double min_dist = 100;//设置的最大距离,最小距离
for (int i = 0; i < descriptors1.rows; i++)//计算出关键点之间的最大距离和最小距离
{
double dist = matches[i].distance;
if (dist < min_dist)
{
min_dist = dist;
}
if (dist > max_dist)
{
max_dist = dist;
}
}
cout << "最大距离阈值:" << max_dist << endl;
cout << "最小距离阈值:" << min_dist << endl;
cout << "所需距离阈值小于三倍的最小距离阈值有效!" << endl;
//存下匹配距离小于3*min_dist的点对
vector<DMatch>good_matches;
for (int i = 0; i < descriptors1.rows; i++)
{
if (matches[i].distance < 3 * (min_dist + DBL_MIN))
{
good_matches.push_back(matches[i]);
}
}
//绘制匹配到的关键点
Mat imgMatches;
drawMatches(src1, //小图片
keyPoint1,//小图片特征点,输出参数
src2, //大图片
keyPoint2,//大图片特征点,输出参数
good_matches, //匹配距离小于3*min_dist的点对
imgMatches, //输出图像
Scalar::all(-1), Scalar::all(-1),//匹配的输出颜色,即线和关键点的颜色,单一特征点的颜色,它也有表示随机生成颜色的默认值Scalar::all(-1)。
vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);//特征绘制的标识符,有默认值NOT_DRAW_SINGLE_POINTS(单点特征点不会被绘制)等。
vector<Point2f>obj;//定义两个局部变量
vector<Point2f>scene;
//从匹配成功的匹配对中获取特征点
for (unsigned int i = 0; i < good_matches.size(); i++)//把匹配对的点放入类型为Point2f类的vector容器里
{
obj.push_back(keyPoint1[good_matches[i].queryIdx].pt);
scene.push_back(keyPoint2[good_matches[i].trainIdx].pt);
}
//使用函数findHomography 寻找匹配上的特征点的变换
//使用函数perspectiveTransform 来映射点
Mat H = findHomography(obj, scene, CV_RANSAC);//计算透视变换 ,找到源图片和目标图片的透视变换H
//CV_RANSAC含义基于RANSAC的鲁棒性方法
//从待测图片中获取角点
vector<Point2f>obj_corners(4);
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(src1.cols, 0);
obj_corners[2] = cvPoint(src1.cols, src1.rows);
obj_corners[3] = cvPoint(0, src1.rows);
vector<Point2f>scene_corners(4);
//进行透视变换
perspectiveTransform(obj_corners, //perspectiveTransform函数作用是进行向量透视矩阵变换
scene_corners,//与原图同样尺寸
H);//透视变换矩阵
//绘制出角点之间的直线
for (int i = 0; i < 3; i++)
{
line(imgMatches, scene_corners[i] + Point2f(static_cast<float>(src1.cols), 0),
scene_corners[i + 1] + Point2f(static_cast<float>(src1.cols), 0), Scalar(255, 0, 123), 4);
}
line(imgMatches, scene_corners[3] + Point2f(static_cast<float>(src1.cols), 0),
scene_corners[0] + Point2f(static_cast<float>(src1.cols), 0), Scalar(255, 0, 123), 4);
imshow("匹配:", imgMatches);
double end = clock();
cout << "整个过程所要时间:" << (end - start) << "ms" << endl;
waitKey(0);
return 1;
}