最近在做烟火检测,需要用到运动检测,看到论文A System for Video Surveillance and Monitoring中的A Hybrid Algorithm for Moving Object Detection这个方法,我用opencv将其实现,代码下面会贴出,但是其的到的结果很差,不知道代码哪里出了问题,请高手给予指点,谢谢!
左图为原图,有图为用上面论文中的方法得到前景图像,【自己的代码可能写错了,请大神们给予指点谢谢】
代码如下:
#include <stdio.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
int main(int argc,char** argv)
{
CvCapture* pCapture=NULL;
cvNamedWindow("source",1);
cvNamedWindow("move_detection",1);
cvMoveWindow("source",30,0);
cvMoveWindow("move_detection",690,0);
char *szFileName = "E:\\技术总结\\视频中的火花和烟雾的检测\\烟的测试视频\\Basketball_yard.avi";
if(!(pCapture=cvCaptureFromFile(szFileName)))
{
fprintf(stderr,"Can not open video file %s\n",szFileName);
return -2;
}
IplImage* pFrame = NULL;
IplImage* pFrImg = NULL;
IplImage* pBkImg = NULL;
CvMat* pFrameMat = NULL;
CvMat* pFrMat = NULL;
CvMat* pBkMat = NULL;
CvMat* pMatChannels[4] = {NULL,NULL,NULL,NULL};
CvMat* pFrameMatPre1 = NULL;
CvMat* pFrameMatPre2 = NULL;
CvMat* pFrameMatCur = NULL;
CvMat* framediff1 = NULL;
CvMat* framediff2 = NULL;
CvMat* threshold = NULL;
int nFrmNum=0;
int frame_height;
int frame_width;
int frame_channel;
int k;
while(pFrame=cvQueryFrame(pCapture))
{
nFrmNum++;
frame_height = pFrame->height;
frame_width = pFrame->width;
frame_channel = pFrame->nChannels;
if(nFrmNum==1)
{
pBkImg = cvCreateImage(cvSize(frame_width, frame_height), IPL_DEPTH_8U,1);
pFrImg = cvCreateImage(cvSize(frame_width, frame_height), IPL_DEPTH_8U,1);
pBkMat = cvCreateMat(frame_height, frame_width, CV_32FC1);
pFrMat = cvCreateMat(frame_height, frame_width, CV_32FC1);
pFrameMat = cvCreateMat(frame_height, frame_width, CV_32FC3);
pFrameMatPre1 = cvCreateMat(frame_height, frame_width, CV_32FC1);
pFrameMatPre2 = cvCreateMat(frame_height, frame_width, CV_32FC1);
pFrameMatCur = cvCreateMat(frame_height, frame_width, CV_32FC1);
framediff1 = cvCreateMat(frame_height, frame_width, CV_32FC1);
framediff2 = cvCreateMat(frame_height, frame_width, CV_32FC1);
threshold = cvCreateMat(frame_height, frame_width, CV_32FC1);
for(k=0;k<frame_channel;k++)
{
pMatChannels[k] = cvCreateMat(frame_height, frame_width, CV_32FC1);
}
//转化成单通道图像再处理
cvConvertScale(pFrame,pFrameMat,1,0);
cvSplit(pFrameMat, pMatChannels[0], pMatChannels[1], pMatChannels[2], pMatChannels[3]);
cvAddWeighted(pMatChannels[0],0.114,pMatChannels[1],0.587,0,pFrameMatPre1);
cvAddWeighted(pMatChannels[2],0.299,pFrameMatPre1,1,0,pFrameMatPre1); //转换为灰度图像
cvConvert(pFrameMatPre1, pBkMat);
cvConvert(pFrameMatPre1, pFrameMatPre2);
cvZero(pFrameMatCur);
cvZero(pFrameMatPre2);
cvSet(threshold,cvScalar(5),NULL); //阈值设为5
}
else
{
double t = (double)cvGetTickCount(); //计算每帧时间
const double alpha = 0.02;
const int threshold_wavelet = 5;
const int threshold_color = 7;
int i,j;
cvConvertScale(pFrame,pFrameMat,1,0);
cvSplit(pFrameMat, pMatChannels[0], pMatChannels[1], pMatChannels[2], pMatChannels[3]);
cvAddWeighted(pMatChannels[0],0.114,pMatChannels[1],0.587,0,pFrameMatCur);
cvAddWeighted(pMatChannels[2],0.299,pFrameMatCur,1,0,pFrameMatCur); //转换为灰度图像
//当前帧跟背景图相减
cvAbsDiff(pFrameMatCur, pFrameMatPre1, framediff1);
cvAbsDiff(pFrameMatCur, pFrameMatPre2, framediff2); //帧差法
//cvAbsDiff(pFrameMatCur, pBkMat, pFrMat);
//二值化前景图
for(i=0;i<frame_height;i++)
for(j=0;j<frame_width;j++)
{
if((cvmGet(framediff1,i,j) > cvmGet(threshold,i,j)) && (cvmGet(framediff2,i,j) > cvmGet(threshold,i,j)))
cvmSet(pFrMat,i,j,255.0);
else
cvmSet(pFrMat,i,j,0.0);
if(cvmGet(pFrMat,i,j))
{
double differ = abs(cvmGet(pFrameMatCur,i,j) - cvmGet(pBkMat,i,j)); //背景差分法
if(differ > cvmGet(threshold,i,j))
cvmSet(pFrMat,i,j,255); //前景像素用255表示
else
cvmSet(pFrMat,i,j,0);
}
}
//进行形态学滤波 ,连通域分析
// cvErode(pFrMat, pFrMat, 0, 1);
// cvDilate(pFrMat, pFrMat, 0, 1);
// cvMorphologyEx( pFrMat, pFrMat, 0, 0, CV_MOP_OPEN, 1 );
// cvMorphologyEx( pFrMat, pFrMat, 0, 0, CV_MOP_CLOSE, 2 );
//
// cvConvert(pFrMat, pFrImg);
// find_connected_components(pFrImg, 1, 4,NULL,NULL,NULL);
// cvConvert(pFrImg,pFrMat);
//当前帧图像更新
cvConvert(pFrameMatPre1,pFrameMatPre2);
cvConvert(pFrameMatCur,pFrameMatPre1);
//背景更新
for(i=0;i<frame_height;i++)
for(j=0;j<frame_width;j++)
{
if(cvGetReal2D(pFrMat,i,j) == 0)
{
double temp = alpha*cvmGet(pBkMat,i,j)+(1-alpha)*cvmGet(pFrameMatCur,i,j);
cvmSet(pBkMat,i,j,temp);
double differ = alpha*cvmGet(threshold,i,j)+(1-alpha)*5*abs(cvmGet(pFrameMatCur,i,j) - cvmGet(pBkMat,i,j));
cvmSet(threshold,i,j,differ);
}
}
//转化为图像格式,用以显示
cvConvert(pBkMat, pBkImg);
cvConvert(pFrMat, pFrImg);
//把图像正过来
//pBkImg->origin = pFrame->origin;
pFrImg->origin = pFrame->origin;
// 计算运算时间
t = (double)cvGetTickCount() - t;
printf( "calculation time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
//显示图像
//cvWriteFrame(writerin,pFrame);
//cvWriteFrame(writerout,pResult);
cvShowImage("source", pFrame);
cvShowImage("move_detection", pFrImg);
cvSaveImage("src.bmp",pFrame);
cvSaveImage("fg.bmp",pFrImg);
//如果有按键事件,则跳出循环
//此等待也为cvShowImage函数提供时间完成显示
//等待时间可以根据CPU速度调整
if( cvWaitKey(10) >= 0 )
break;
}
}
//销毁窗口
cvDestroyWindow("source");
cvDestroyWindow("move_detection");
//cvDestroyWindow("spatial_wavelet_detection");
cvDestroyWindow("color_detection");
cvDestroyWindow("result");
//释放图像和矩阵
cvReleaseImage(&pFrImg);
cvReleaseImage(&pBkImg);
cvReleaseMat(&pFrMat);
cvReleaseMat(&pBkMat);
cvReleaseMat(&pFrameMat);
cvReleaseMat(&pFrameMatCur);
cvReleaseMat(&pFrameMatPre1);
cvReleaseMat(&pFrameMatPre2);
cvReleaseMat(&framediff1);
cvReleaseMat(&framediff2);
for(k=0;k<frame_channel;k++)
{
cvReleaseMat(&pMatChannels[k]);
}
//cvReleaseVideoWriter(&writerin);
//cvReleaseVideoWriter(&writerout);
cvReleaseCapture(&pCapture);
return 0;
}
请大家给予指点,谢谢!
对应的工程可以免费下载:http://download.csdn.net/detail/lwjaiyjk3/7452145
对这个运动目标检测方法实现的结果A Hybrid Algorithm for Moving Object Detection