计算两端yuv视频流中每一帧的ssim值

方法同上一篇,只不多这里在计算的时候用了opencv1的接口,出现了一些问题,最后总算解决了。

程序:

#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cv.h>
#include <highgui.h>
#define NUM_FRAME 100 //只处理前100帧,根据视频帧数可修改
void CalcPsnr(const char *in1,const char *in2)
{
	cv::VideoCapture vc1;
	cv::VideoCapture vc2;
	bool flag1 = vc1.open(in1);
	bool flag2 = vc2.open(in2);
	if (!flag1||!flag2)
	{
		printf("avi file open error \n");
		system("pause");
		exit(-1);
	}

	int frmCount1 = vc1.get(CV_CAP_PROP_FRAME_COUNT);
	int frmCount2 = vc2.get(CV_CAP_PROP_FRAME_COUNT);
	printf("frmCount: %d \n", frmCount1);
	printf("frmCount: %d \n", frmCount2);
	for (int i = 0; i < frmCount1; i++)
	{
		printf("%d/%d \n", i + 1, frmCount1);
		cv::Mat image_ref;
		vc1 >> image_ref;
		cv::Mat image_obj;
		vc2 >> image_obj;
		double mse = 0;
		double div_r = 0;
		double div_g = 0;
		double div_b = 0;
		int width = image_ref.cols;
		int height = image_ref.rows;
		double psnr = 0;
		for (int v = 0; v < height; v++)
		{
			for (int u = 0; u < width; u++)
			{
				div_r = image_ref.at<cv::Vec3b>(v, u)[0] - image_obj.at<cv::Vec3b>(v, u)[0];
				div_g = image_ref.at<cv::Vec3b>(v, u)[1] - image_obj.at<cv::Vec3b>(v, u)[1];
				div_b = image_ref.at<cv::Vec3b>(v, u)[2] - image_obj.at<cv::Vec3b>(v, u)[2];
				mse += ((div_r*div_r + div_b*div_b + div_g*div_g) / 3);

			}
		}
		mse = mse / (width*height);
		psnr = 10 * log10(255 * 255 / mse);
		printf("%lf\n", mse);
		printf("%lf\n", psnr);
	}
	return;
}
void  CalcSsim(const char *in1,const char *in2)
{
	CvCapture* capture1 = cvCreateFileCapture(in1);
	CvCapture* capture2 = cvCreateFileCapture(in2);
	int i = 0;
	// default settings
	double C1 = 6.5025, C2 = 58.5225;

	IplImage
		*img1 = NULL, *img2 = NULL, *img1_img2 = NULL,
		*img1_temp = NULL, *img2_temp = NULL,
		*img1_sq = NULL, *img2_sq = NULL,
		*mu1 = NULL, *mu2 = NULL,
		*mu1_sq = NULL, *mu2_sq = NULL, *mu1_mu2 = NULL,
		*sigma1_sq = NULL, *sigma2_sq = NULL, *sigma12 = NULL,
		*ssim_map = NULL, *temp1 = NULL, *temp2 = NULL, *temp3 = NULL;
	while (1)
	{
		printf("%d/%d \n", ++i, NUM_FRAME);

		/***************************** INITS **********************************/
		img1_temp = cvQueryFrame(capture1);
		img2_temp = cvQueryFrame(capture2);

		if (img1_temp == NULL || img2_temp == NULL)
			return;

		int x = img1_temp->width, y = img1_temp->height;
		int nChan = img1_temp->nChannels, d = IPL_DEPTH_32F;
		CvSize size = cvSize(x, y);

		img1 = cvCreateImage(size, d, nChan);
		img2 = cvCreateImage(size, d, nChan);

		cvConvert(img1_temp, img1);
		cvConvert(img2_temp, img2);
		/*cvReleaseImage(&img1_temp);
		cvReleaseImage(&img2_temp);*/

		img1_sq = cvCreateImage(size, d, nChan);
		img2_sq = cvCreateImage(size, d, nChan);
		img1_img2 = cvCreateImage(size, d, nChan);

		cvPow(img1, img1_sq, 2);
		cvPow(img2, img2_sq, 2);
		cvMul(img1, img2, img1_img2, 1);

		mu1 = cvCreateImage(size, d, nChan);
		mu2 = cvCreateImage(size, d, nChan);

		mu1_sq = cvCreateImage(size, d, nChan);
		mu2_sq = cvCreateImage(size, d, nChan);
		mu1_mu2 = cvCreateImage(size, d, nChan);

		sigma1_sq = cvCreateImage(size, d, nChan);
		sigma2_sq = cvCreateImage(size, d, nChan);
		sigma12 = cvCreateImage(size, d, nChan);

		temp1 = cvCreateImage(size, d, nChan);
		temp2 = cvCreateImage(size, d, nChan);
		temp3 = cvCreateImage(size, d, nChan);

		ssim_map = cvCreateImage(size, d, nChan);
		/*************************** END INITS **********************************/

		//////////////////////////////////////////////////////////////////////////
		// PRELIMINARY COMPUTING
		cvSmooth(img1, mu1, CV_GAUSSIAN, 11, 11, 1.5);
		cvSmooth(img2, mu2, CV_GAUSSIAN, 11, 11, 1.5);

		cvPow(mu1, mu1_sq, 2);
		cvPow(mu2, mu2_sq, 2);
		cvMul(mu1, mu2, mu1_mu2, 1);

		cvSmooth(img1_sq, sigma1_sq, CV_GAUSSIAN, 11, 11, 1.5);
		cvAddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq);

		cvSmooth(img2_sq, sigma2_sq, CV_GAUSSIAN, 11, 11, 1.5);
		cvAddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq);

		cvSmooth(img1_img2, sigma12, CV_GAUSSIAN, 11, 11, 1.5);
		cvAddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12);

		//////////////////////////////////////////////////////////////////////////
		// FORMULA  

		// (2*mu1_mu2 + C1)
		cvScale(mu1_mu2, temp1, 2);
		cvAddS(temp1, cvScalarAll(C1), temp1);

		// (2*sigma12 + C2)
		cvScale(sigma12, temp2, 2);
		cvAddS(temp2, cvScalarAll(C2), temp2);

		// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
		cvMul(temp1, temp2, temp3, 1);

		// (mu1_sq + mu2_sq + C1)
		cvAdd(mu1_sq, mu2_sq, temp1);
		cvAddS(temp1, cvScalarAll(C1), temp1);

		// (sigma1_sq + sigma2_sq + C2)
		cvAdd(sigma1_sq, sigma2_sq, temp2);
		cvAddS(temp2, cvScalarAll(C2), temp2);

		// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
		cvMul(temp1, temp2, temp1, 1);

		// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
		cvDiv(temp3, temp1, ssim_map, 1);

		CvScalar index_scalar = cvAvg(ssim_map);

		// through observation, there is approximately
		// 1% error max with the original matlab program  

		/*cout << "(R, G & B SSIM index)" << std::endl;
		cout << index_scalar.val[2] << endl;
		cout << index_scalar.val[1] << endl;
		cout << index_scalar.val[0] << endl;*/

		cvReleaseImage(&img1_sq);
		cvReleaseImage(&img2_sq);
		cvReleaseImage(&img1_img2);
		cvReleaseImage(&mu1);
		cvReleaseImage(&mu2);
		cvReleaseImage(&mu1_sq);
		cvReleaseImage(&mu2_sq);
		cvReleaseImage(&mu1_mu2);
		cvReleaseImage(&sigma1_sq);
		cvReleaseImage(&sigma2_sq);
		cvReleaseImage(&sigma12);
		cvReleaseImage(&temp1);
		cvReleaseImage(&temp2);
		cvReleaseImage(&temp3);
		cvReleaseImage(&ssim_map);
		//double ssim=max(max(index_scalar.val[0], index_scalar.val[1]), index_scalar.val[2]);
		double ssim = (index_scalar.val[0] + index_scalar.val[1] + index_scalar.val[2]) / 3;
		std::cout << ssim << std::endl;

	}
	cvReleaseCapture(&capture1);
	cvReleaseCapture(&capture2);
	return;
}
void DisplayYUV2RGB(const char *dir,const char *in,int _w,int _h)
{
	int w = _w;
	int h = _h;
	printf("yuv file w: %d, h: %d \n", w, h);
	FILE* pFileIn = fopen(in, "rb+");
	int bufLen = w*h * 3 / 2;
	unsigned char* pYuvBuf = new unsigned char[bufLen];
	int iCount = 0;

	for (int i = 0; i<NUM_FRAME; i++)
	{
		fread(pYuvBuf, bufLen*sizeof(unsigned char), 1, pFileIn);

		cv::Mat yuvImg;
		yuvImg.create(h * 3 / 2, w, CV_8UC1);
		memcpy(yuvImg.data, pYuvBuf, bufLen*sizeof(unsigned char));
		cv::Mat rgbImg;
		cv::cvtColor(yuvImg, rgbImg, CV_YUV2BGR_I420);

		cv::imshow("img", rgbImg);
		char s[100];
		sprintf(s,"%spic%d%s",dir,i,".jpg");
		cv::imwrite(s, rgbImg);
		cv::waitKey(1);

		printf("%d \n", iCount++);
	}
	delete[] pYuvBuf;
	fclose(pFileIn);
}
void Image_to_video(const char* in, const char* out)
{
	int num = 1;
	CvSize size = cvSize(1024, 768);  //视频帧格式的大小
	double fps = 30; //每秒钟的帧率
	CvVideoWriter *writer = cvCreateVideoWriter(out, CV_FOURCC('D', 'I', 'V', 'X'), fps, size); //创建视频文件
	char cname[100];
	sprintf(cname, in, num); //加载图片的文件夹,图片的名称编号是1开始1,2,3,4,5.。。。
	IplImage *src = cvLoadImage(cname);
	if (!src)
	{
		return;
	}
	IplImage *src_resize = cvCreateImage(size, 8, 3); //创建视频文件格式大小的图片
	cvNamedWindow("avi");
	while (src)
	{
		cvShowImage("avi", src_resize);
		cvWaitKey(1);
		cvResize(src, src_resize); //将读取的图片设置为视频格式大小相同
		cvWriteFrame(writer, src_resize); //保存图片为视频流格式
		cvReleaseImage(&src); //释放空间
		num++;
		sprintf(cname, in, num);
		src = cvLoadImage(cname);       //循环读取数据
	}
	cvReleaseVideoWriter(&writer);
	cvReleaseImage(&src_resize);
}
int main(int argc, char *argv[])
{
	const char *out = "C:/Users/jiang/Desktop/output/book_virtual08.yuv";
	const char *dir = "C:/Users/jiang/Desktop/output/tupian1/";
	DisplayYUV2RGB(dir, out, 1024, 768);
	const char *outImagename = "C:/Users/jiang/Desktop/output/tupian1/pic%d.jpg";
	const char *outVideoname = "C:/Users/jiang/Desktop/output/3outfile.avi";
	Image_to_video(outImagename, outVideoname);

	out = "C:/Users/jiang/Desktop/bookarrival/bookarrival_c_8.yuv";
	dir = "C:/Users/jiang/Desktop/output/tupian1/";
	DisplayYUV2RGB(dir, out, 1024, 768);
	outImagename = "C:/Users/jiang/Desktop/output/tupian1/pic%d.jpg";
	outVideoname = "C:/Users/jiang/Desktop/output/4outfile.avi";
	Image_to_video(outImagename, outVideoname);

	const char *in1 = "C:/Users/jiang/Desktop/output/3outfile.avi";
	const char *in2 = "C:/Users/jiang/Desktop/output/4outfile.avi";
	CalcPsnr(in1, in2);
	CalcSsim(in1, in2);

	getchar();
}

版权声明:本文为博主原创文章,未经博主允许不得转载。

时间: 2024-10-20 23:22:37

计算两端yuv视频流中每一帧的ssim值的相关文章

计算两段yuv格式视频流中每一帧的psnr值

现在虚拟视点生成了yuv格式的视频流,如何计算每一帧的psnr值,进行了如下步骤: 1.对yuv视频流进行转换为jpg图片: 2.把jpg图片转化为avi视频流,便于批处理: 3.对avi视频中的每一帧计算psnr值. 程序: #include <stdlib.h> #include <stdio.h> #include <math.h> #include <cv.h> #include <highgui.h> #define NUM_FRAME

ffmpeg 从视频流中抓取图片

从视频中不断抓取图片的基本流程:打开视频流地址->获取视频流packt->解码成图片帧->输出图片 一.初始化Ffmpeg void ffmpegInit(){ av_register_all(); avformat_network_init(); av_log_set_level(AV_LOG_ERROR); } 如果你不想输出log,设置log级别为AV_LOG_PANIC. 二.打开视频. int Open(char* url) { context = avformat_alloc

调用opencv相关函数,从视频流中提取出图片序列

/************************ @HJ 2017/3/30 参考http://blog.sina.com.cn/s/blog_4b0020f301010qcz.html修改的代码 @主要调用opencv相关函数,从视频流中提取出图片序列 @需要注意的问题: cvReleaseImage(&pImg)释放内存出错的两种情况:具体可以参考http://www.cnblogs.com/grandyang/p/4615036.html 1)从摄像头获取的图片不能被修改和释放,所以如果

计算1到100000中出现93的次数

一个同学在群上要求出计算1到100000中出现93的次数,然后,我就写脚本了. cat count.sh  #!/bin/bash sum=0 for num in {1..100000} do echo $num | grep 93 [ $? -eq 0 ] && ((sum=sum+1)) done echo "sum=$sum" 然后有同学给出答案了,秒杀我一万里 seq 1 100000 | grep 93 |wc -l 我想静静  /(ㄒoㄒ)/~~   让我

【译】 AWK教程指南 3计算并打印文件中指定的字段数据

awk 处理数据时,它会自动从数据文件中一次读取一条记录,并会将该记录切分成一个个的字段:程序中可使用 $1, $2,... 直接取得各个字段的内容.这个特色让使用者易于用 awk 编写 reformatter 来改变数据格式. 范例:以数据文件 emp.dat 为例,计算每人应发工资并打印报表. 分析:awk 会自行一次读入一条记录,故程序中仅需告诉 awk 如何处理所读入的数据行. 执行如下命令:($ 表UNIX命令行上的提示符)  $ awk '{ print $2, $3 * $4 }'

使用linq计算元素在列表中出现的次数c#代码

c#使用linq计算元素在列表中出现的次数,调用方法非常简单,和sql语句很像 //codego.net计算的次数一个项目出现在这个列表 public static int CountTimes<T>(this List<T> inputList, T searchItem){return ((from t in inputList where t.Equals(searchItem) select t).Count());}

python学习之——计算给出代码中注释、代码、空行的行数

题目:计算给出代码中注释.代码.空行的行数 来源:网络 思路:注释行以 ‘#’开头,空行以 ‘\n’ 开头,以此作为判断 def count_linenum(fname): fobj = open(fname,"rU") #print fobj.readlines() count_blankline = 0 count_notes = 0 count_code = 0 for eachLine in fobj: if eachLine[0] == '\n': count_blankli

计算任意无序字符串中的最大有序串

private void compare() { //定义个无序字符串 String str = "sdifsdafsdabfwqicweedio"; //置于hashset去重 HashSet<Character> set = new HashSet<Character>(); for (int i = 0; i < str.length(); i++) { set.add(str.charAt(i)); } System.out.println(&qu

边缘计算在智慧城市中的应用【摘录】

在中国,智慧城市是城市化发展的高级阶段,智慧城市建设是推进符合中国特色的城市信息化样本,兼具战略和现实意义.智慧城市构想是创造“宜居.舒适.安全”的城市生活环境,要改善城市综合管理.经济建设.民生服务等方面,实现城市“感知.互联和智慧”.要实现这一目标,离不开先进和创新的技术支撑. 从铺设网络.装置传感器.搭建系统平台到实现数据全采集,边缘计算在智慧城市中有着丰富的应用场景.在道路两侧路灯杆上安装传感器,便于收集城市路面信息,检测空气质量.光照强度.噪音水平等环境数据.当路灯发生故障时能够即时反