特征匹配篇

1. ORB特征匹配 BruteForce-Hamming匹配

//使用ORB特征匹配两张图片,并进行运行时间,对称性测试,ratio测试

#include <iostream>
#include <ctime>
//#include <dirent.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>

using namespace cv;
using namespace std;

/*
int main(int argc, const char *argv[])
{

if (argc != 4){
cout << "usage:match <image1> <image2> <ratio>\n";
exit(-1);
}

double ratio = (double)atof(argv[3]);
string image1_name = string(argv[1]), image2_name = string(argv[2]);

Mat image1 = imread(image1_name, 1);
Mat image2 = imread(image2_name, 1);
*/
int main()
{
    Mat image1 = imread("img_1.bmp", 1);
    Mat image2 = imread("img_2.bmp", 1);
    Ptr<FeatureDetector> detector;
    Ptr<DescriptorExtractor> extractor;
    float ratio = 0.8;           // 修改获得不同实验结果

    // ORB特征点、描述子检测器
    detector = FeatureDetector::create("ORB");
    extractor = DescriptorExtractor::create("ORB");

    cout << "ORB特征点、描述子、BruteForce-Hamming匹配" << endl;
    cout << "ratio = " << ratio << endl;
    clock_t begin = clock();
    // 特征点
    vector<KeyPoint> keypoints1, keypoints2;
    detector->detect(image1, keypoints1);
    detector->detect(image2, keypoints2);

    cout << "# keypoints of image1 :" << keypoints1.size() << endl;
    cout << "# keypoints of image2 :" << keypoints2.size() << endl;
    // 计算描述子
    Mat descriptors1, descriptors2;
    extractor->compute(image1, keypoints1, descriptors1);
    extractor->compute(image2, keypoints2, descriptors2);

    cout << "Descriptors size :" << descriptors1.cols << ":" << descriptors1.rows << endl;

    vector< vector<DMatch> > matches12, matches21;
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
    // knn:K最近邻,flann: 快速最近邻逼近搜索
    matcher->knnMatch(descriptors1, descriptors2, matches12, 2);  // 最近邻匹配
    matcher->knnMatch(descriptors2, descriptors1, matches21, 2);

    // BFMatcher bfmatcher(NORM_L2, true);
    // vector<DMatch> matches;
    // bfmatcher.match(descriptors1, descriptors2, matches);
    cout << "Matches1-2:" << matches12.size() << endl;
    cout << "Matches2-1:" << matches21.size() << endl;

    // ratio test proposed by David Lowe paper = 0.8
    std::vector<DMatch> good_matches1, good_matches2;

    // Yes , the code here is redundant, it is easy to reconstruct it ....
    for (int i = 0; i < matches12.size(); i++){
        if (matches12[i][0].distance < ratio * matches12[i][1].distance)
            good_matches1.push_back(matches12[i][0]);
    }

    for (int i = 0; i < matches21.size(); i++){
        if (matches21[i][0].distance < ratio * matches21[i][1].distance)
            good_matches2.push_back(matches21[i][0]);
    }

    cout << "Good matches1:" << good_matches1.size() << endl;
    cout << "Good matches2:" << good_matches2.size() << endl;

    // Symmetric Test 对称性测试
    std::vector<DMatch> better_matches;
    for (int i = 0; i<good_matches1.size(); i++){
        for (int j = 0; j<good_matches2.size(); j++){
            if (good_matches1[i].queryIdx == good_matches2[j].trainIdx && good_matches2[j].queryIdx == good_matches1[i].trainIdx){
                better_matches.push_back(DMatch(good_matches1[i].queryIdx, good_matches1[i].trainIdx, good_matches1[i].distance));
                break;
            }
        }
    }

    cout << "Better matches:" << better_matches.size() << endl;

    // 计时结束
    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    cout << "Time Costs : " << elapsed_secs << endl;

    // 输出
    Mat output;
    drawMatches(image1, keypoints1, image2, keypoints2, better_matches, output);
    imshow("Matches result", output);
    waitKey(0);

    return 0;
}

2. surf特征点、描述子、Flann算法匹配描述子

// surf特征点匹配:surf特征点、描述子、Flann算法匹配描述子

#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
#include <iostream>
#include <ctime>
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
    Mat img_1 = imread("img_1.bmp", 1);
    Mat img_2 = imread("img_2.bmp", 1);
    if (!img_1.data || !img_2.data) {
        printf("读取图片image错误! \n"); return false;
    }

    cout << "SURF特征点、描述子、FLANN描述子匹配" << endl;
    cout << "筛选条件:5倍最小距离" << endl;

    clock_t begin = clock();
    // 特征点
    int minHessian = 300;   // surf算法中的hessian阈值
    SURF detector(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;
    detector.detect(img_1, keypoints_1);
    detector.detect(img_2, keypoints_2);
    cout << "# keypoints of image1 :" << keypoints_1.size() << endl;
    cout << "# keypoints of image2 :" << keypoints_2.size() << endl;

    // 描述子/特征向量
    SURF extractor;
    Mat descriptors_1, descriptors_2;
    extractor.compute(img_1, keypoints_1, descriptors_1);
    extractor.compute(img_2, keypoints_2, descriptors_2);
    cout << "Descriptors size :" << descriptors_1.cols << ":" << descriptors_1.rows << endl;

    // 匹配描述子
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match(descriptors_1, descriptors_2, matches);
    double max_dist = 0; double min_dist = 100;

    // 特征点最大最小距离
    for (int i = 0; i < descriptors_1.rows; i++) {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    // 输出
    printf("> 最大距离(Max dist) : %f \n", max_dist);
    printf("> 最小距离(Min dist) : %f \n", min_dist);

    // 筛选
    std::vector< DMatch > good_matches;
    for (int i = 0; i < descriptors_1.rows; i++) {
        if (matches[i].distance < 5 * min_dist) {
            good_matches.push_back(matches[i]);
        }
    }
    cout << "Good_matches:" << good_matches.size() << endl;
    // 计时结束
    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    cout << "Time Costs : " << elapsed_secs << endl;

    // 绘制
    Mat img_matches;
    drawMatches(img_1, keypoints_1, img_2, keypoints_2,
        good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    /*
    // 输出 每个匹配的特征点
    for (int i = 0; i < good_matches.size(); i++)
    {
        printf(">符合条件的匹配点 [%d] 特征点1: %d  -- 特征点2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
    }
    */
    // 显示
    imshow("匹配效果图", img_matches);

    // 任意键退出
    waitKey(0);
    return 0;
}

3. sift特征点、描述子+FLANN算法

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>

#include <iostream>
//#include <dirent.h>
#include <ctime>
using namespace cv;
using namespace std;

int main(int argc, const char *argv[]){
    /*
    if (argc != 3){
        cout << "usage:match <level> <image1> <image2>\n";
        exit(-1);
    }

    string arg2 = string(argv[2]);
    int level = atoi(arg2.c_str());
    string image1_name = string(argv[1]), image2_name = string(argv[2]);
    // getline(cin, image1_name);
    // getline(cin, image2_name);
    */
    Mat image1 = imread("img_1.bmp", 1);
    Mat image2 = imread("img_2.bmp", 1);

    Ptr<FeatureDetector> detector;
    Ptr<DescriptorExtractor> extractor;

    initModule_nonfree();
    /*
    * SIFT,SURF, ORB
    */
    detector = FeatureDetector::create("SIFT");
    extractor = DescriptorExtractor::create("SIFT");

    cout << "Sift特征点、描述子、FLANN匹配" << endl;
    // cout << "ratio = " << ratio << endl;
    clock_t begin = clock();

    vector<KeyPoint> keypoints1, keypoints2;
    detector->detect(image1, keypoints1);
    detector->detect(image2, keypoints2);

    cout << "# keypoints of image1 :" << keypoints1.size() << endl;
    cout << "# keypoints of image2 :" << keypoints2.size() << endl;

    Mat descriptors1, descriptors2;
    extractor->compute(image1, keypoints1, descriptors1);
    extractor->compute(image2, keypoints2, descriptors2);

    cout << "Descriptors size :" << descriptors1.cols << ":" << descriptors1.rows << endl;

    vector< vector<DMatch> > matches12, matches21;
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    matcher->knnMatch(descriptors1, descriptors2, matches12, 2);
    matcher->knnMatch(descriptors2, descriptors1, matches21, 2);

    cout << "Matches1-2:" << matches12.size() << endl;
    cout << "Matches2-1:" << matches21.size() << endl;

    // ratio test proposed by David Lowe paper = 0.8
    std::vector<DMatch> good_matches1, good_matches2;

    for (int i = 0; i < matches12.size(); i++){
        const float ratio = 0.8;
        if (matches12[i][0].distance < ratio * matches12[i][1].distance)
            good_matches1.push_back(matches12[i][0]);
    }

    for (int i = 0; i < matches21.size(); i++){
        const float ratio = 0.8;
        if (matches21[i][0].distance < ratio * matches21[i][1].distance)
            good_matches2.push_back(matches21[i][0]);
    }

    cout << "Good matches1:" << good_matches1.size() << endl;
    cout << "Good matches2:" << good_matches2.size() << endl;

    // Symmetric Test
    std::vector<DMatch> better_matches;
    for (int i = 0; i<good_matches1.size(); i++){
        for (int j = 0; j<good_matches2.size(); j++){
            if (good_matches1[i].queryIdx == good_matches2[j].trainIdx && good_matches2[j].queryIdx == good_matches1[i].trainIdx){
                better_matches.push_back(DMatch(good_matches1[i].queryIdx, good_matches1[i].trainIdx, good_matches1[i].distance));
                break;
            }
        }
    }

    cout << "Better matches:" << better_matches.size() << endl;

    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    cout << "Time Costs : " << elapsed_secs << endl;

    // show it on an image
    Mat output;
    drawMatches(image1, keypoints1, image2, keypoints2, better_matches, output);
    imshow("Matches result", output);
    waitKey(0);

    return 0;
}

4. SIFT特征点、描述子+暴力匹配

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/features2d.hpp> //
#include <opencv2/nonfree/nonfree.hpp>

#include <iostream>
//#include <dirent.h>
#include <ctime>
using namespace cv;
using namespace std;

int main(int argc, const char *argv[]){
    /*
    if (argc != 3){
    cout << "usage:match <level> <image1> <image2>\n";
    exit(-1);
    }

    string arg2 = string(argv[2]);
    int level = atoi(arg2.c_str());
    string image1_name = string(argv[1]), image2_name = string(argv[2]);
    // getline(cin, image1_name);
    // getline(cin, image2_name);
    */
    Mat image1 = imread("img_1.bmp", 1);
    Mat image2 = imread("img_2.bmp", 1);

    Ptr<FeatureDetector> detector;
    Ptr<DescriptorExtractor> extractor;

    initModule_nonfree();
    /*
    * SIFT,SURF, ORB
    */
    detector = FeatureDetector::create("SIFT");
    extractor = DescriptorExtractor::create("SIFT");

    cout << "sift特征点、描述子、暴力匹配" << endl;
    //cout << "筛选条件:5倍最小距离" << endl;
    clock_t begin = clock();

    vector<KeyPoint> keypoints1, keypoints2;
    detector->detect(image1, keypoints1);
    detector->detect(image2, keypoints2);

    cout << "# keypoints of image1 :" << keypoints1.size() << endl;
    cout << "# keypoints of image2 :" << keypoints2.size() << endl;

    Mat descriptors1, descriptors2;
    extractor->compute(image1, keypoints1, descriptors1);
    extractor->compute(image2, keypoints2, descriptors2);

    cout << "Descriptors size :" << descriptors1.cols << ":" << descriptors1.rows << endl;

    vector< vector<DMatch> > matches12, matches21;
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    matcher->knnMatch(descriptors1, descriptors2, matches12, 2);
    matcher->knnMatch(descriptors2, descriptors1, matches21, 2);

    // BFMatcher bfmatcher(NORM_L2, true);
    // vector<DMatch> matches;
    // bfmatcher.match(descriptors1, descriptors2, matches);
    cout << "Matches1-2:" << matches12.size() << endl;
    cout << "Matches2-1:" << matches21.size() << endl;

    // ratio test proposed by David Lowe paper = 0.8
    std::vector<DMatch> good_matches1, good_matches2;

    for (int i = 0; i < matches12.size(); i++){
        const float ratio = 0.8;
        if (matches12[i][0].distance < ratio * matches12[i][1].distance)
            good_matches1.push_back(matches12[i][0]);
    }

    for (int i = 0; i < matches21.size(); i++){
        const float ratio = 0.8;
        if (matches21[i][0].distance < ratio * matches21[i][1].distance)
            good_matches2.push_back(matches21[i][0]);
    }

    cout << "Good matches1:" << good_matches1.size() << endl;
    cout << "Good matches2:" << good_matches2.size() << endl;

    // Symmetric Test
    std::vector<DMatch> better_matches;
    for (int i = 0; i<good_matches1.size(); i++){
        for (int j = 0; j<good_matches2.size(); j++){
            if (good_matches1[i].queryIdx == good_matches2[j].trainIdx && good_matches2[j].queryIdx == good_matches1[i].trainIdx){
                better_matches.push_back(DMatch(good_matches1[i].queryIdx, good_matches1[i].trainIdx, good_matches1[i].distance));
                break;
            }
        }
    }

    cout << "Better matches:" << better_matches.size() << endl;

    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    cout << "Time Costs : " << elapsed_secs << endl;

    // show it on an image
    Mat output;
    drawMatches(image1, keypoints1, image2, keypoints2, better_matches, output);
    imshow("Matches result", output);
    waitKey(0);

    return 0;
}

5. fast特征点 surf描述子 暴力匹配

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/features2d.hpp> //
#include <opencv2/nonfree/nonfree.hpp>

#include <iostream>
//#include <dirent.h>
#include <ctime>
using namespace cv;
using namespace std;

#define IMG_DIR "./imgs/"

bool has_suffix(const std::string &str, const std::string &suffix);

int main(int argc, const char *argv[]){
    /*
    if (argc != 2){
        cout << "usage:match <method>\n";
        exit(-1);
    }
    string method = string(argv[1]);

    string image1_name, image2_name;

    getline(cin, image1_name);
    getline(cin, image2_name);
    */
    Mat image1 = imread("img_1.bmp", 1);
    Mat image2 = imread("img_2.bmp", 1);

    FastFeatureDetector fast(40);   // 检测的阈值为40
    SurfDescriptorExtractor extractor;

    //Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("SIFT");
    // WHY CANNOT WORK ???
    cout << "fast特征点、surf描述子、暴力匹配" << endl;
    clock_t begin = clock();

    vector<KeyPoint> keypoints1, keypoints2;
    fast.detect(image1, keypoints1);
    fast.detect(image2, keypoints2);

    cout << "# keypoints of image1 :" << keypoints1.size() << endl;
    cout << "# keypoints of image2 :" << keypoints2.size() << endl;

    Mat descriptors1, descriptors2;
    extractor.compute(image1, keypoints1, descriptors1);
    extractor.compute(image2, keypoints2, descriptors2);

    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    cout << "Time Costs : " << elapsed_secs << endl;

    BFMatcher bfmatcher(NORM_L2, true);
    vector<DMatch> matches;
    bfmatcher.match(descriptors1, descriptors2, matches);
    cout << "# matches : " << matches.size() << endl;

    // show it on an image
    Mat output;
    drawMatches(image1, keypoints1, image2, keypoints2, matches, output);
    imshow("Matches result", output);
    waitKey(0);

    return 0;
}

bool has_suffix(const std::string &str, const std::string &suffix)
{
    return str.size() >= suffix.size() &&
        str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
}

6. fast特征点、orb描述子、BruteForce-Hamming匹配

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/features2d.hpp> //
#include <opencv2/nonfree/nonfree.hpp>

#include <iostream>
//#include <dirent.h>
#include <ctime>
using namespace cv;
using namespace std;

int main(int argc, const char *argv[]){
    /*
    if (argc != 3){
        cout << "usage:match <image1> <image2>\n";
        exit(-1);
    }

    string image1_name = string(argv[1]), image2_name = string(argv[2]);
    // getline(cin, image1_name);
    // getline(cin, image2_name);
    */
    Mat image1 = imread("img_1.bmp", 1);
    Mat image2 = imread("img_2.bmp", 1);

    vector<KeyPoint> keypoints_1, keypoints_2;
    Mat descriptors_1, descriptors_2;

    cout << "fast特征点、orb描述子、BruteForce-Hamming匹配" << endl;
    //cout << "筛选条件:5倍最小距离" << endl;
    clock_t begin = clock();

    Ptr<FeatureDetector> detector;
    detector = new DynamicAdaptedFeatureDetector(new FastAdjuster(10, true), 3000, 6000, 8);
    detector->detect(image1, keypoints_1);
    detector->detect(image2, keypoints_2);
    cout << "# keypoints of image1 :" << keypoints_1.size() << endl;
    cout << "# keypoints of image2 :" << keypoints_2.size() << endl;

    initModule_nonfree();//NB. need this, otherwise get coredump ,oops !!!!!
    Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("ORB");
    extractor->compute(image1, keypoints_1, descriptors_1);
    extractor->compute(image2, keypoints_2, descriptors_2);

    vector< vector<DMatch> > matches;
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
    matcher->knnMatch(descriptors_1, descriptors_2, matches, 500);

    //look whether the match is inside a defined area of the image
    //only 25% of maximum of possible distance
    double tresholdDist = 0.25 * sqrt(double(image1.size().height*image1.size().height + image1.size().width*image1.size().width));

    vector< DMatch > good_matches2;
    good_matches2.reserve(matches.size());
    for (size_t i = 0; i < matches.size(); ++i){
        for (int j = 0; j < matches[i].size(); j++)    {
            Point2f from = keypoints_1[matches[i][j].queryIdx].pt;
            Point2f to = keypoints_2[matches[i][j].trainIdx].pt;

            //calculate local distance for each possible match
            double dist = sqrt((from.x - to.x) * (from.x - to.x) + (from.y - to.y) * (from.y - to.y));

            //save as best match if local distance is in specified area and on same height
            if (dist < tresholdDist && abs(from.y - to.y)<5)  {
                good_matches2.push_back(matches[i][j]);
                j = matches[i].size();
            }
        }
    }

    cout << "Good matches :" << good_matches2.size() << endl;

    clock_t end = clock();
    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    cout << "Time Costs : " << elapsed_secs << endl;

    // show it on an image
    Mat output;
    drawMatches(image1, keypoints_1, image2, keypoints_2, good_matches2, output);
    imshow("Matches result", output);
    waitKey(0);

}

参考资料

1. http://blog.csdn.net/vonzhoufz/article/details/46594369

2. 《opencv3编程入门》

时间: 2024-10-21 13:42:12

特征匹配篇的相关文章

【特征匹配】SIFT原理之KD树+BBF算法解析

继上一篇中已经介绍了SIFT原理点击打开链接,最后得到了一系列特征点,每个特征点对应一个128维向量.假如现在有两副图片都已经提取到特征点,现在要做的就是匹配上相似的特征点. 相似性查询有两种基本方式:1.范围查询:即给点查询点和查询阈值,从数据集中找出所有与查询点距离小于阈值的点. 2.K近邻查询:给点查询点及正整数K,从数据集中找到与查询点最近的K个数据,当K=1时,就是最近邻查询. 特征匹配算子可以分为两类:1.穷举法:即将数据集中的点与查询点逐一计算距离,如果图1提取到N1个特征点,图2

OpenCV2:特征匹配及其优化

在OpenCV2简单的特征匹配中对使用OpenCV2进行特征匹配的步骤做了一个简单的介绍,其匹配出的结果是非常粗糙的,在这篇文章中对使用OpenCV2进行匹配的细化做一个简单的总结.主要包括以下几个内容: DescriptorMatcher DMatcher KNN匹配 计算两视图的基础矩阵F,并细化匹配结果 计算两视图的单应矩阵H,并细化匹配结果 DescriptorMatcher 和 DMatcher DescriptorMatcher是匹配特征向量的抽象类,在OpenCV2中的特征匹配方法

OpenCV特征点检測------Surf(特征点篇)

Surf(Speed Up Robust Feature) Surf算法的原理                                                                           1.构建Hessian矩阵构造高斯金字塔尺度空间 事实上surf构造的金字塔图像与sift有非常大不同,就是由于这些不同才加快了其检測的速度. Sift採用的是DOG图像.而surf採用的是Hessian矩阵行列式近似值图像.Hessian矩阵是Surf算法的核心,为了方

OpenCV特征点检测------Surf(特征点篇)

Surf(Speed Up Robust Feature) Surf算法的原理                                                                           1.构建Hessian矩阵构造高斯金字塔尺度空间 其实surf构造的金字塔图像与sift有很大不同,就是因为这些不同才加快了其检测的速度.Sift采用的是DOG图像,而surf采用的是Hessian矩阵行列式近似值图像.Hessian矩阵是Surf算法的核心,为了方便运算

特征检测和特征匹配方法

一幅图像中总存在着其独特的像素点,这些点我们可以认为就是这幅图像的特征,成为特征点.计算机视觉领域中的很重要的图像特征匹配就是一特征点为基础而进行的,所以,如何定义和找出一幅图像中的特征点就非常重要.这篇文章我总结了视觉领域最常用的几种特征点以及特征匹配的方法. 在计算机视觉领域,兴趣点(也称关键点或特征点)的概念已经得到了广泛的应用, 包括目标识别. 图像配准. 视觉跟踪. 三维重建等. 这个概念的原理是, 从图像中选取某些特征点并对图像进行局部分析,而非观察整幅图像. 只要图像中有足够多可检

OpenCV探索之路(二十三):特征检测和特征匹配方法汇总

http://www.cnblogs.com/skyfsm/p/7401523.html 一幅图像中总存在着其独特的像素点,这些点我们可以认为就是这幅图像的特征,成为特征点.计算机视觉领域中的很重要的图像特征匹配就是一特征点为基础而进行的,所以,如何定义和找出一幅图像中的特征点就非常重要.这篇文章我总结了视觉领域最常用的几种特征点以及特征匹配的方法. 在计算机视觉领域,兴趣点(也称关键点或特征点)的概念已经得到了广泛的应用, 包括目标识别. 图像配准. 视觉跟踪. 三维重建等. 这个概念的原理是

[翻译]鲁棒的尺度不变特征匹配在遥感图像配准中应用(Robust Scale-Invariant Feature Matching for Remote Sensing Image Registration)

李乔亮,汪国有,刘建国,会员,IEEE,和陈少波 2008年8月7日接收;2008年10月22日和2008年11月27日修改.2009年2月2日首版:当前版本出版于2009年4月17日.本项工作由中国国家基础研究项目60672060资助. 中国湖北省武汉市华中科技大学模式识别与人工智能国家重点实验室,邮编430074(邮箱:[email protected];   [email protected];  [email protected];  [email protected]) 数字对象识别编

特征匹配

计算机视觉课堂笔记 回顾:特征提取中分为点(Harris等),线(Canny算子),区域(MSER)等特征的提取. 相应的特征匹配就会有特征点匹配,直线匹配,曲线匹配,区域匹配. 而在众多研究中以点匹配居多,点匹配的基本原则:利用图像点周围的信息来描述点,如灰度信息,颜色信息,梯度信息等,然后进行 相似性度量. 点匹配典型方法: 基于灰度分布的匹配:Cross-correlation: 基于梯度分布的匹配:SIFT:Daisy descriptor: 其他匹配方法:Eigenvector:ICP

特征提取(Detect)、特征描述(Descriptor)、特征匹配(Match)的通俗解释

特征匹配(Feature Match)是计算机视觉中很多应用的基础,所以花一些时间去深入理解这个概念是不为过的.本文希望通过一种通俗易懂的方式来阐述特征匹配这个过程,以及在过程中遇到的一些问题. 概念理解: 假设这样的一个场景,小明和小小明都在看一个图片,但是他们想知道他们看的是否是同一幅图片,于是他们就通过电话描述这个图片,来判断是否是同一个图片.比如说有下面两个图片                对话1: 小白:我的图片里面有五个很明显的特征,分别在图像的上下左右中五个位置. 小黑:我的图片