opencv源码分析:有关boosting的源代码( cvBoostStartTraining, cvBoostNextWeakClassifier, cvBoostEndTraining)

/*****************************************************************************************                                        Boosting                                        *
\****************************************************************************************/

typedef struct CvBoostTrainer
{
    CvBoostType type;
    int count;             /* (idx) ? number_of_indices : number_of_samples */
    int* idx;
    float* F;
} CvBoostTrainer;

/*
 * cvBoostStartTraining, cvBoostNextWeakClassifier, cvBoostEndTraining
 *
 * These functions perform training of 2-class boosting classifier
 * using ANY appropriate weak classifier
 */

static
CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
                                       CvMat* weakTrainVals,
                                       CvMat* /*weights*/,
                                       CvMat* sampleIdx,
                                       CvBoostType type )
{
    uchar* ydata;
    int ystep;
    int m;
    uchar* traindata;
    int trainstep;
    int trainnum;
    int i;
    int idx;

    size_t datasize;
    CvBoostTrainer* ptr;

    int idxnum;
    int idxstep;
    uchar* idxdata;

    assert( trainClasses != NULL );
    assert( CV_MAT_TYPE( trainClasses->type ) == CV_32FC1 );
    assert( weakTrainVals != NULL );
    assert( CV_MAT_TYPE( weakTrainVals->type ) == CV_32FC1 );

    CV_MAT2VEC( *trainClasses, ydata, ystep, m );
    CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum );

    CV_Assert( m == trainnum );

    idxnum = 0;
    idxstep = 0;
    idxdata = NULL;
    if( sampleIdx )
    {
        CV_MAT2VEC( *sampleIdx, idxdata, idxstep, idxnum );
    }

    datasize = sizeof( *ptr ) + sizeof( *ptr->idx ) * idxnum;
    ptr = (CvBoostTrainer*) cvAlloc( datasize );
    memset( ptr, 0, datasize );
    ptr->F = NULL;
    ptr->idx = NULL;

    ptr->count = m;
    ptr->type = type;

    if( idxnum > 0 )
    {
        CvScalar s;

        ptr->idx = (int*) (ptr + 1);
        ptr->count = idxnum;
        for( i = 0; i < ptr->count; i++ )
        {
            cvRawDataToScalar( idxdata + i*idxstep, CV_MAT_TYPE( sampleIdx->type ), &s );
            ptr->idx[i] = (int) s.val[0];
        }
    }
    for( i = 0; i < ptr->count; i++ )
    {
        idx = (ptr->idx) ? ptr->idx[i] : i;

        *((float*) (traindata + idx * trainstep)) =
            2.0F * (*((float*) (ydata + idx * ystep))) - 1.0F;
    }

    return ptr;
}

/*
 *
 * Discrete AdaBoost functions
 *
 */
static
float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,
                                     CvMat* trainClasses,
                                     CvMat* /*weakTrainVals*/,
                                     CvMat* weights,
                                     CvBoostTrainer* trainer )
{
    uchar* evaldata;
    int evalstep;
    int m;
    uchar* ydata;
    int ystep;
    int ynum;
    uchar* wdata;
    int wstep;
    int wnum;

    float sumw;
    float err;
    int i;
    int idx;

    CV_Assert( weakEvalVals != NULL );
    CV_Assert( CV_MAT_TYPE( weakEvalVals->type ) == CV_32FC1 );
    CV_Assert( trainClasses != NULL );
    CV_Assert( CV_MAT_TYPE( trainClasses->type ) == CV_32FC1 );
    CV_Assert( weights != NULL );
    CV_Assert( CV_MAT_TYPE( weights ->type ) == CV_32FC1 );

    CV_MAT2VEC( *weakEvalVals, evaldata, evalstep, m );
    CV_MAT2VEC( *trainClasses, ydata, ystep, ynum );
    CV_MAT2VEC( *weights, wdata, wstep, wnum );

    CV_Assert( m == ynum );
    CV_Assert( m == wnum );

    sumw = 0.0F;
    err = 0.0F;
    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        sumw += *((float*) (wdata + idx*wstep));
        err += (*((float*) (wdata + idx*wstep))) *
            ( (*((float*) (evaldata + idx*evalstep))) !=
                2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F );
    }
    err /= sumw;
    err = -cvLogRatio( err );

    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        *((float*) (wdata + idx*wstep)) *= expf( err *
            ((*((float*) (evaldata + idx*evalstep))) !=
                2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F) );
        sumw += *((float*) (wdata + idx*wstep));
    }
    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        *((float*) (wdata + idx * wstep)) /= sumw;
    }

    return err;
}

/*
 *
 * Real AdaBoost functions
 *
 */
static
float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,
                                     CvMat* trainClasses,
                                     CvMat* /*weakTrainVals*/,
                                     CvMat* weights,
                                     CvBoostTrainer* trainer )
{
    uchar* evaldata;
    int evalstep;
    int m;
    uchar* ydata;
    int ystep;
    int ynum;
    uchar* wdata;
    int wstep;
    int wnum;

    float sumw;
    int i, idx;

    CV_Assert( weakEvalVals != NULL );
    CV_Assert( CV_MAT_TYPE( weakEvalVals->type ) == CV_32FC1 );
    CV_Assert( trainClasses != NULL );
    CV_Assert( CV_MAT_TYPE( trainClasses->type ) == CV_32FC1 );
    CV_Assert( weights != NULL );
    CV_Assert( CV_MAT_TYPE( weights ->type ) == CV_32FC1 );

    CV_MAT2VEC( *weakEvalVals, evaldata, evalstep, m );
    CV_MAT2VEC( *trainClasses, ydata, ystep, ynum );
    CV_MAT2VEC( *weights, wdata, wstep, wnum );

    CV_Assert( m == ynum );
    CV_Assert( m == wnum );

    sumw = 0.0F;
    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        *((float*) (wdata + idx*wstep)) *= expf( (-(*((float*) (ydata + idx*ystep))) + 0.5F)
            * cvLogRatio( *((float*) (evaldata + idx*evalstep)) ) );
        sumw += *((float*) (wdata + idx*wstep));
    }
    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        *((float*) (wdata + idx*wstep)) /= sumw;
    }

    return 1.0F;
}

/*
 *
 * LogitBoost functions
 *
 */
#define CV_LB_PROB_THRESH      0.01F
#define CV_LB_WEIGHT_THRESHOLD 0.0001F

static
void icvResponsesAndWeightsLB( int num, uchar* wdata, int wstep,
                               uchar* ydata, int ystep,
                               uchar* fdata, int fstep,
                               uchar* traindata, int trainstep,
                               int* indices )
{
    int i, idx;
    float p;

    for( i = 0; i < num; i++ )
    {
        idx = (indices) ? indices[i] : i;

        p = 1.0F / (1.0F + expf( -(*((float*) (fdata + idx*fstep)))) );
        *((float*) (wdata + idx*wstep)) = MAX( p * (1.0F - p), CV_LB_WEIGHT_THRESHOLD );
        if( *((float*) (ydata + idx*ystep)) == 1.0F )
        {
            *((float*) (traindata + idx*trainstep)) =
                1.0F / (MAX( p, CV_LB_PROB_THRESH ));
        }
        else
        {
            *((float*) (traindata + idx*trainstep)) =
                -1.0F / (MAX( 1.0F - p, CV_LB_PROB_THRESH ));
        }
    }
}

static
CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,
                                         CvMat* weakTrainVals,
                                         CvMat* weights,
                                         CvMat* sampleIdx,
                                         CvBoostType type )
{
    size_t datasize;
    CvBoostTrainer* ptr;

    uchar* ydata;
    int ystep;
    int m;
    uchar* traindata;
    int trainstep;
    int trainnum;
    uchar* wdata;
    int wstep;
    int wnum;
    int i;

    int idxnum;
    int idxstep;
    uchar* idxdata;

    assert( trainClasses != NULL );
    assert( CV_MAT_TYPE( trainClasses->type ) == CV_32FC1 );
    assert( weakTrainVals != NULL );
    assert( CV_MAT_TYPE( weakTrainVals->type ) == CV_32FC1 );
    assert( weights != NULL );
    assert( CV_MAT_TYPE( weights->type ) == CV_32FC1 );

    CV_MAT2VEC( *trainClasses, ydata, ystep, m );
    CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum );
    CV_MAT2VEC( *weights, wdata, wstep, wnum );

    CV_Assert( m == trainnum );
    CV_Assert( m == wnum );

    idxnum = 0;
    idxstep = 0;
    idxdata = NULL;
    if( sampleIdx )
    {
        CV_MAT2VEC( *sampleIdx, idxdata, idxstep, idxnum );
    }

    datasize = sizeof( *ptr ) + sizeof( *ptr->F ) * m + sizeof( *ptr->idx ) * idxnum;
    ptr = (CvBoostTrainer*) cvAlloc( datasize );
    memset( ptr, 0, datasize );
    ptr->F = (float*) (ptr + 1);
    ptr->idx = NULL;

    ptr->count = m;
    ptr->type = type;

    if( idxnum > 0 )
    {
        CvScalar s;

        ptr->idx = (int*) (ptr->F + m);
        ptr->count = idxnum;
        for( i = 0; i < ptr->count; i++ )
        {
            cvRawDataToScalar( idxdata + i*idxstep, CV_MAT_TYPE( sampleIdx->type ), &s );
            ptr->idx[i] = (int) s.val[0];
        }
    }

    for( i = 0; i < m; i++ )
    {
        ptr->F[i] = 0.0F;
    }

    icvResponsesAndWeightsLB( ptr->count, wdata, wstep, ydata, ystep,
                              (uchar*) ptr->F, sizeof( *ptr->F ),
                              traindata, trainstep, ptr->idx );

    return ptr;
}

static
float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,
                                    CvMat* trainClasses,
                                    CvMat* weakTrainVals,
                                    CvMat* weights,
                                    CvBoostTrainer* trainer )
{
    uchar* evaldata;
    int evalstep;
    int m;
    uchar* ydata;
    int ystep;
    int ynum;
    uchar* traindata;
    int trainstep;
    int trainnum;
    uchar* wdata;
    int wstep;
    int wnum;
    int i, idx;

    assert( weakEvalVals != NULL );
    assert( CV_MAT_TYPE( weakEvalVals->type ) == CV_32FC1 );
    assert( trainClasses != NULL );
    assert( CV_MAT_TYPE( trainClasses->type ) == CV_32FC1 );
    assert( weakTrainVals != NULL );
    assert( CV_MAT_TYPE( weakTrainVals->type ) == CV_32FC1 );
    assert( weights != NULL );
    assert( CV_MAT_TYPE( weights ->type ) == CV_32FC1 );

    CV_MAT2VEC( *weakEvalVals, evaldata, evalstep, m );
    CV_MAT2VEC( *trainClasses, ydata, ystep, ynum );
    CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum );
    CV_MAT2VEC( *weights, wdata, wstep, wnum );

    CV_Assert( m == ynum );
    CV_Assert( m == wnum );
    CV_Assert( m == trainnum );
    //assert( m == trainer->count );

    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        trainer->F[idx] += *((float*) (evaldata + idx * evalstep));
    }

    icvResponsesAndWeightsLB( trainer->count, wdata, wstep, ydata, ystep,
                              (uchar*) trainer->F, sizeof( *trainer->F ),
                              traindata, trainstep, trainer->idx );

    return 1.0F;
}

/*
 *
 * Gentle AdaBoost
 *
 */
static
float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals,
                                     CvMat* trainClasses,
                                     CvMat* /*weakTrainVals*/,
                                     CvMat* weights,
                                     CvBoostTrainer* trainer )
{
    uchar* evaldata;
    int evalstep;
    int m;
    uchar* ydata;
    int ystep;
    int ynum;
    uchar* wdata;
    int wstep;
    int wnum;

    int i, idx;
    float sumw;

    CV_Assert( weakEvalVals != NULL );
    CV_Assert( CV_MAT_TYPE( weakEvalVals->type ) == CV_32FC1 );
    CV_Assert( trainClasses != NULL );
    CV_Assert( CV_MAT_TYPE( trainClasses->type ) == CV_32FC1 );
    CV_Assert( weights != NULL );
    CV_Assert( CV_MAT_TYPE( weights->type ) == CV_32FC1 );

    CV_MAT2VEC( *weakEvalVals, evaldata, evalstep, m );
    CV_MAT2VEC( *trainClasses, ydata, ystep, ynum );
    CV_MAT2VEC( *weights, wdata, wstep, wnum );

    CV_Assert( m == ynum );
    CV_Assert( m == wnum );

    sumw = 0.0F;
    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        *((float*) (wdata + idx*wstep)) *=
            expf( -(*((float*) (evaldata + idx*evalstep)))
                  * ( 2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F ) );
        sumw += *((float*) (wdata + idx*wstep));
    }

    for( i = 0; i < trainer->count; i++ )
    {
        idx = (trainer->idx) ? trainer->idx[i] : i;

        *((float*) (wdata + idx*wstep)) /= sumw;
    }

    return 1.0F;
}

typedef CvBoostTrainer* (*CvBoostStartTraining)( CvMat* trainClasses,
                                                 CvMat* weakTrainVals,
                                                 CvMat* weights,
                                                 CvMat* sampleIdx,
                                                 CvBoostType type );

typedef float (*CvBoostNextWeakClassifier)( CvMat* weakEvalVals,
                                            CvMat* trainClasses,
                                            CvMat* weakTrainVals,
                                            CvMat* weights,
                                            CvBoostTrainer* data );

CvBoostStartTraining startTraining[4] = {
        icvBoostStartTraining,
        icvBoostStartTraining,
        icvBoostStartTrainingLB,
        icvBoostStartTraining
    };

CvBoostNextWeakClassifier nextWeakClassifier[4] = {
        icvBoostNextWeakClassifierDAB,
        icvBoostNextWeakClassifierRAB,
        icvBoostNextWeakClassifierLB,
        icvBoostNextWeakClassifierGAB
    };

/*
 *
 * Dispatchers
 *
 */
CV_BOOST_IMPL
CvBoostTrainer* cvBoostStartTraining( CvMat* trainClasses,
                                      CvMat* weakTrainVals,
                                      CvMat* weights,
                                      CvMat* sampleIdx,
                                      CvBoostType type )
{
    return startTraining[type]( trainClasses, weakTrainVals, weights, sampleIdx, type );
}

CV_BOOST_IMPL
void cvBoostEndTraining( CvBoostTrainer** trainer )
{
    cvFree( trainer );
    *trainer = NULL;
}

CV_BOOST_IMPL
float cvBoostNextWeakClassifier( CvMat* weakEvalVals,
                                 CvMat* trainClasses,
                                 CvMat* weakTrainVals,
                                 CvMat* weights,
                                 CvBoostTrainer* trainer )
{
    return nextWeakClassifier[trainer->type]( weakEvalVals, trainClasses,
        weakTrainVals, weights, trainer    );
}
时间: 2024-10-12 09:31:28

opencv源码分析:有关boosting的源代码( cvBoostStartTraining, cvBoostNextWeakClassifier, cvBoostEndTraining)的相关文章

opencv源码分析:cvCreateTreeCascadeClassifier

我使用的是opencv2.4.9,cvCreateTreeCascadeClassifier的源码在......opencv\sources\apps\haartraining\cvhaartraining.cpp之中,这中间用到很多结构体,指针函数,宏等各方面的内容,请参考我博客中的文章opencv源码详细解读目录.如需转载请注明本博网址http://blog.csdn.net/ding977921830?viewmode=contents.具体内容如下: /* * cvCreateCasca

opencv源码分析之二:cvhaartraining.cpp

我使用的是opencv2.4.9,安装后,我的cvboost..cpp文件的路径是........\opencv\sources\apps\haartraining\cvhaartraining.cpp,研究源码那么多天,有很多收获,opencv库真是非常强大.其中在这篇博文中我有部分注释,其他的有关知识请参考我博客http://blog.csdn.net/ding977921830?viewmode=contents.具体内容如下: /*M///////////////////////////

OpenCV源码分析:RGB到其他色彩空间的转换

1.流程调用图 2.部分代码分析 //模板函数进行颜色空间的转换 template <typename Cvt> void CvtColorLoop(const Mat& src, Mat& dst, const Cvt& cvt) { //封装Tbb的并行结构parallel_for,OpenCV导出为:parallel_for_ //Range:迭代范围类 CvtColorLoop_Invoker<Cvt>:模板类 parallel_for_(Range

opencv源码分析:icvGetTrainingDataCallback简介

/* *函数icvGetTrainingDataCallback介绍 *功能:对所有样本计算特征编号从first开始的num个特征,并保存到mat里. *输入: *CvMat* mat矩阵样本总数个行,num个列.保存每个样本的num个特征值. *First:特征类型编号的开始处 *Num:要计算的特征类型个数. *Userdata:积分矩阵和权重.特征模板等信息. *输出: *CvMat* mat矩阵样本总数个行,num个列.保存每个样本的num个特征值. */ static void icv

OpenStack_Swift源码分析——Ring的rebalance算法源代码详细分析

今天有同学去百度,带回一道面试题,和大家分享一下: 打印: n=1 1 n=2 3 3 2 4 1 1 4 5 5 n=3 7 7 7 7 6 8 3 3 2 6 8 4 1 1 6 8 4 5 5 5 8 9 9 9 9 提供一段参考程序: <pre name="code" class="cpp">// ConsoleApplication1.cpp: 主项目文件. #include "stdafx.h" #include &quo

OpenCV学习笔记(27)KAZE 算法原理与源码分析(一)非线性扩散滤波

http://blog.csdn.net/chenyusiyuan/article/details/8710462 OpenCV学习笔记(27)KAZE 算法原理与源码分析(一)非线性扩散滤波 2013-03-23 17:44 16963人阅读 评论(28) 收藏 举报 分类: 机器视觉(34) 版权声明:本文为博主原创文章,未经博主允许不得转载. 目录(?)[+] KAZE系列笔记: OpenCV学习笔记(27)KAZE 算法原理与源码分析(一)非线性扩散滤波 OpenCV学习笔记(28)KA

【计算机视觉】OpenCV人脸识别facerec源码分析2——LBPH概述

人脸识别 从OpenCV2.4开始,加入了新的类FaceRecognizer,我们可以使用它便捷地进行人脸识别实验.其源代码可以在OpenCV中的opencv\modules\contrib\doc\facerec\src下找到. 目前支持的算法有: Eigenfaces特征脸createEigenFaceRecognizer() Fisherfaces createFisherFaceRecognizer() Local Binary Patterns Histograms局部二值直方图 cr

【OpenCV】SIFT原理与源码分析:DoG尺度空间构造

<SIFT原理与源码分析>系列文章索引:http://www.cnblogs.com/tianyalu/p/5467813.html 尺度空间理论 自然界中的物体随着观测尺度不同有不同的表现形态.例如我们形容建筑物用“米”,观测分子.原子等用“纳米”.更形象的例子比如Google地图,滑动鼠标轮可以改变观测地图的尺度,看到的地图绘制也不同:还有电影中的拉伸镜头等等…… 尺度空间中各尺度图像的模糊程度逐渐变大,能够模拟人在距离目标由近到远时目标在视网膜上的形成过程.尺度越大图像越模糊. 为什么要

OpenCV人脸识别Eigen算法源码分析

1 理论基础 学习Eigen人脸识别算法需要了解一下它用到的几个理论基础,现总结如下: 1.1 协方差矩阵 首先需要了解一下公式: 共公式可以看出:均值描述的是样本集合的平均值,而标准差描述的则是样本集合的各个样本点到均值的距离之平均.以一个国家国民收入为例,均值反映了平均收入,而均方差/方差则反映了贫富差距,如果两个国家国民收入均值相等,则标准差越大说明国家的国民收入越不均衡,贫富差距较大.以上公式都是用来描述一维数据量的,把方差公式推广到二维,则可得到协方差公式: 协方差表明了两个随机变量之