这几天的成果主要有:
1.由于深度图像本身的缺陷,给原始的深度图像加上预处理,首先是对深度图像进行形态学闭运算,大小为3*3的方形块。然后进行中值滤波,大小为5*5.
cv::Mat element3(3, 3, CV_8U, cv::Scalar(1));
cv::morphologyEx(imageDepth, imageDepth, cv::MORPH_CLOSE, element3);
cv::morphologyEx(imageDepth2, imageDepth2, cv::MORPH_CLOSE, element3);
cv::medianBlur(imageDepth, imageDepth, 5);
cv::medianBlur(imageDepth2, imageDepth2, 5);
2.由于正向映射会出现裂纹现象,所以进行逆向映射,先正向生成深度图像,然后对深度图像进行逆向映射,生成对应的纹理图像,由于生成的深度图像也会有裂纹,所以进行必要的中值滤波。
void wrapingImage_inverse(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut, cv::Mat &imageDepthOut)
{
for (int v = 0; v < imageColor.rows; v++)
for (int u = 0; u < imageColor.cols; u++)
{
double d = imageDepthOut.at<cv::Vec3b>(v, u)[0];
if (d == 0)
continue;
if (imageColorOut.at<cv::Vec3b>(v, u)[0] != 0 && imageColorOut.at<cv::Vec3b>(v, u)[1] != 0 && imageColorOut.at<cv::Vec3b>(v, u)[2] != 0)
continue;
pointProject_from_ref_to_otherView(pts, proj, u, v, d);
int u1 = (int)pts[ref][0];
int v1 = (int)pts[ref][1];
int k1 = (int)pts[ref][2];
if (u1 < 0 || u1 >= imageColor.cols - 1 || v1 < 0 || v1 >= imageColor.rows - 1)
continue;
imageColorOut.at<cv::Vec3b>(v, u) = imageColor.at<cv::Vec3b>(v1, u1);
}
}
3.要生成的比较完整的纹理图像,所以用左参考图像生成的纹理图像之后,空洞部分用右参考图像生成的部分进行填补。
wrapingImage(ref, proj, imageColor, imageDepth, imageColorOut, imageDepthOut);
wrapingImage(ref2, proj, imageColor2, imageDepth2, imageColorOut2, imageDepthOut2);
wrapingImage_inverse(ref, proj, imageColor, imageDepth, imageColorOut, imageDepthOut);
wrapingImage_inverse(ref2, proj, imageColor2, imageDepth2, imageColorOut, imageDepthOut2);
4.存在的问题是,由于左右参考图像的关照强度不一样,生成的图像有的较暗,有的较亮,合成之后出现伪影比较严重。如何解决呢??
5.结果.
1.生成的图像
2.原始图像
6.源代码:
#ifndef _wrapingOf3D1 #define _wrapingOf3D1 #include<iostream> #include<opencv2\opencv.hpp> #include <math.h> #include <stdio.h> #include <cv.h> #include <highgui.h> using std::cout; using std::endl; double max(double x, double y) { return ((x > y) ? x : y); } double ssim(char *ref_image, char *obj_image) { // default settings double C1 = 6.5025, C2 = 58.5225; IplImage *img1 = NULL, *img2 = NULL, *img1_img2 = NULL, *img1_temp = NULL, *img2_temp = NULL, *img1_sq = NULL, *img2_sq = NULL, *mu1 = NULL, *mu2 = NULL, *mu1_sq = NULL, *mu2_sq = NULL, *mu1_mu2 = NULL, *sigma1_sq = NULL, *sigma2_sq = NULL, *sigma12 = NULL, *ssim_map = NULL, *temp1 = NULL, *temp2 = NULL, *temp3 = NULL; /***************************** INITS **********************************/ img1_temp = cvLoadImage(ref_image); img2_temp = cvLoadImage(obj_image); if (img1_temp == NULL || img2_temp == NULL) return -1; int x = img1_temp->width, y = img1_temp->height; int nChan = img1_temp->nChannels, d = IPL_DEPTH_32F; CvSize size = cvSize(x, y); img1 = cvCreateImage(size, d, nChan); img2 = cvCreateImage(size, d, nChan); cvConvert(img1_temp, img1); cvConvert(img2_temp, img2); cvReleaseImage(&img1_temp); cvReleaseImage(&img2_temp); img1_sq = cvCreateImage(size, d, nChan); img2_sq = cvCreateImage(size, d, nChan); img1_img2 = cvCreateImage(size, d, nChan); cvPow(img1, img1_sq, 2); cvPow(img2, img2_sq, 2); cvMul(img1, img2, img1_img2, 1); mu1 = cvCreateImage(size, d, nChan); mu2 = cvCreateImage(size, d, nChan); mu1_sq = cvCreateImage(size, d, nChan); mu2_sq = cvCreateImage(size, d, nChan); mu1_mu2 = cvCreateImage(size, d, nChan); sigma1_sq = cvCreateImage(size, d, nChan); sigma2_sq = cvCreateImage(size, d, nChan); sigma12 = cvCreateImage(size, d, nChan); temp1 = cvCreateImage(size, d, nChan); temp2 = cvCreateImage(size, d, nChan); temp3 = cvCreateImage(size, d, nChan); ssim_map = cvCreateImage(size, d, nChan); /*************************** END INITS **********************************/ ////////////////////////////////////////////////////////////////////////// // PRELIMINARY COMPUTING cvSmooth(img1, mu1, CV_GAUSSIAN, 11, 11, 1.5); cvSmooth(img2, mu2, CV_GAUSSIAN, 11, 11, 1.5); cvPow(mu1, mu1_sq, 2); cvPow(mu2, mu2_sq, 2); cvMul(mu1, mu2, mu1_mu2, 1); cvSmooth(img1_sq, sigma1_sq, CV_GAUSSIAN, 11, 11, 1.5); cvAddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq); cvSmooth(img2_sq, sigma2_sq, CV_GAUSSIAN, 11, 11, 1.5); cvAddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq); cvSmooth(img1_img2, sigma12, CV_GAUSSIAN, 11, 11, 1.5); cvAddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12); ////////////////////////////////////////////////////////////////////////// // FORMULA // (2*mu1_mu2 + C1) cvScale(mu1_mu2, temp1, 2); cvAddS(temp1, cvScalarAll(C1), temp1); // (2*sigma12 + C2) cvScale(sigma12, temp2, 2); cvAddS(temp2, cvScalarAll(C2), temp2); // ((2*mu1_mu2 + C1).*(2*sigma12 + C2)) cvMul(temp1, temp2, temp3, 1); // (mu1_sq + mu2_sq + C1) cvAdd(mu1_sq, mu2_sq, temp1); cvAddS(temp1, cvScalarAll(C1), temp1); // (sigma1_sq + sigma2_sq + C2) cvAdd(sigma1_sq, sigma2_sq, temp2); cvAddS(temp2, cvScalarAll(C2), temp2); // ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)) cvMul(temp1, temp2, temp1, 1); // ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)) cvDiv(temp3, temp1, ssim_map, 1); CvScalar index_scalar = cvAvg(ssim_map); // through observation, there is approximately // 1% error max with the original matlab program cout << "(R, G & B SSIM index)" << std::endl; cout << index_scalar.val[2] << endl; cout << index_scalar.val[1] << endl; cout << index_scalar.val[0] << endl; cvReleaseImage(&img1_sq); cvReleaseImage(&img2_sq); cvReleaseImage(&img1_img2); cvReleaseImage(&mu1); cvReleaseImage(&mu2); cvReleaseImage(&mu1_sq); cvReleaseImage(&mu2_sq); cvReleaseImage(&mu1_mu2); cvReleaseImage(&sigma1_sq); cvReleaseImage(&sigma2_sq); cvReleaseImage(&sigma12); cvReleaseImage(&temp1); cvReleaseImage(&temp2); cvReleaseImage(&temp3); cvReleaseImage(&ssim_map); //double ssim=max(max(index_scalar.val[0], index_scalar.val[1]), index_scalar.val[2]); double ssim = (index_scalar.val[0] + index_scalar.val[1] + index_scalar.val[2]) / 3; return ssim; } double psnr(char *ref_image, char *obj_image) { cv::Mat image_ref = cv::imread(ref_image); cv::Mat image_obj = cv::imread(obj_image); double mse = 0; double div_r = 0; double div_g = 0; double div_b = 0; int width = image_ref.cols; int height = image_ref.rows; double psnr = 0; for (int v = 0; v < height; v++) { for (int u = 0; u < width; u++) { div_r = image_ref.at<cv::Vec3b>(v, u)[0] - image_obj.at<cv::Vec3b>(v, u)[0]; div_g = image_ref.at<cv::Vec3b>(v, u)[1] - image_obj.at<cv::Vec3b>(v, u)[1]; div_b = image_ref.at<cv::Vec3b>(v, u)[2] - image_obj.at<cv::Vec3b>(v, u)[2]; mse += ((div_r*div_r + div_b*div_b + div_g*div_g) / 3); } } mse = mse / (width*height); psnr = 10 * log10(255 * 255 / mse); printf("%lf\n", mse); printf("%lf\n", psnr); return psnr; } class Histogram1D{ private: int histSize[1]; float hranges[2]; const float *ranges[1]; int channels[1]; public: Histogram1D(){ histSize[0] = 256; hranges[0] = 0.0; hranges[1] = 255.0; ranges[0] = hranges; channels[0] = 0; } cv::MatND getHistogram(const cv::Mat &image){ cv::MatND hist; cv::calcHist(&image, 1, channels, cv::Mat(), hist, 1, histSize, ranges); return hist; } cv::Mat getHistogramImage(const cv::Mat &image){ cv::MatND hist = getHistogram(image); double maxVal = 0; double minVal = 0; cv::minMaxLoc(hist, &minVal, &maxVal, 0, 0); cv::Mat histImg(histSize[0], histSize[0], CV_8U, cv::Scalar(255)); int hpt = static_cast<int>(0.9*histSize[0]); for (int h = 0; h < histSize[0]; h++){ float binVal = hist.at<float>(h); int intensity = static_cast<int>(binVal*hpt / maxVal); cv::line(histImg, cv::Point(h, histSize[0]), cv::Point(h, histSize[0] - intensity), cv::Scalar::all(100), 1); } return histImg; } }; /* **define a struct included intrinsic and extrinsic args */ typedef struct { double m_K[3][3]; // 3x3 intrinsic matrix double m_RotMatrix[3][3]; // rotation matrix double m_Trans[3]; // translation vector double m_ProjMatrix[4][4]; // projection matrix } CalibStruct; /* **define globle variables */ CalibStruct m_CalibParams[8]; int m_NumCameras = 8; int m_Width = 1024, m_Height = 768; // camera resolution is 1024x768 double pts[8][3]; /* **declare function */ void InitializeFromFile(char *fileName); double DepthLevelToZ(unsigned char d); unsigned char ZToDepthLever(double z); double projXYZtoUV(double projMatrix[4][4], double x, double y, double z, double *u, double *v); void projUVZtoXY(double projMatrix[4][4], double u, double v, double z, double *x, double *y, double pt[8][2]); void wrapingImage(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut); void pointProject_from_ref_to_otherView(double pts[8][2], int ref, int u, int v, unsigned char d); /* **define function */ /* ** read text file and write args to struct of globle variable */ void readCalibrationFile(char *fileName) { int i, j, k; FILE *pIn; double dIn; // dummy variable int camIdx; if (pIn = fopen(fileName, "r")) { for (k = 0; k<m_NumCameras; k++) { // camera index fscanf(pIn, "%d", &camIdx); //std::cout << camIdx << std::endl; // camera intrinsics for (i = 0; i < 3; i++){ fscanf(pIn, "%lf\t%lf\t%lf", &(m_CalibParams[camIdx].m_K[i][0]), &(m_CalibParams[camIdx].m_K[i][1]), &(m_CalibParams[camIdx].m_K[i][2])); //std::cout << (m_CalibParams[camIdx].m_K[i][0])<<"\t"<<(m_CalibParams[camIdx].m_K[i][1]) <<"\t"<< (m_CalibParams[camIdx].m_K[i][2]) << std::endl; } // read barrel distortion params (assume 0) fscanf(pIn, "%lf", &dIn); fscanf(pIn, "%lf", &dIn); // read extrinsics for (i = 0; i<3; i++) { for (j = 0; j<3; j++) { fscanf(pIn, "%lf", &dIn); m_CalibParams[camIdx].m_RotMatrix[i][j] = dIn; //std::cout << m_CalibParams[camIdx].m_RotMatrix[i][j] << std::endl; } fscanf(pIn, "%lf", &dIn); m_CalibParams[camIdx].m_Trans[i] = dIn; } } fclose(pIn); } }// readCalibrationFile /* **calcular all projectionMatrices depended on struct variables */ void computeProjectionMatrices() { int i, j, k, camIdx; double(*inMat)[3]; double exMat[3][4]; for (camIdx = 0; camIdx<m_NumCameras; camIdx++) { // The intrinsic matrix inMat = m_CalibParams[camIdx].m_K; // The extrinsic matrix for (i = 0; i<3; i++) { for (j = 0; j<3; j++) { exMat[i][j] = m_CalibParams[camIdx].m_RotMatrix[i][j]; } } for (i = 0; i<3; i++) { exMat[i][3] = m_CalibParams[camIdx].m_Trans[i]; } // Multiply the intrinsic matrix by the extrinsic matrix to find our projection matrix for (i = 0; i<3; i++) { for (j = 0; j<4; j++) { m_CalibParams[camIdx].m_ProjMatrix[i][j] = 0.0; for (k = 0; k<3; k++) { m_CalibParams[camIdx].m_ProjMatrix[i][j] += inMat[i][k] * exMat[k][j]; } } } m_CalibParams[camIdx].m_ProjMatrix[3][0] = 0.0; m_CalibParams[camIdx].m_ProjMatrix[3][1] = 0.0; m_CalibParams[camIdx].m_ProjMatrix[3][2] = 0.0; m_CalibParams[camIdx].m_ProjMatrix[3][3] = 1.0; } } /** **init projection matrix */ void InitializeFromFile(char *fileName) { readCalibrationFile(fileName); computeProjectionMatrices(); } /** **calcular z depended on d of depth image */ double DepthLevelToZ(unsigned char d) { double z; double MinZ = 44.0, MaxZ = 120.0; z = 1.0 / ((d / 255.0)*(1.0 / MinZ - 1.0 / MaxZ) + 1.0 / MaxZ); return z; } /** **calcular d of depth image depended on z */ unsigned char ZToDepthLever(double z) { double MinZ = 44.0, MaxZ = 120.0; unsigned char d; d = (unsigned char)(255.0*(1 / (double)z - 1 / MaxZ) / (1 / MinZ - 1 / MaxZ)); return d; } /** **calcular x,y depended on u,v,z and projection Matrix **for example,projection Matrix is m_CalibParams[i].m_ProjMatrix which is index of camera */ void projUVZtoXY(double projMatrix[4][4], double u, double v, double z, double *x, double *y) { double c0, c1, c2; // image (0,0) is bottom lefthand corner v = (double)m_Height - v - 1.0; c0 = z*projMatrix[0][2] + projMatrix[0][3]; c1 = z*projMatrix[1][2] + projMatrix[1][3]; c2 = z*projMatrix[2][2] + projMatrix[2][3]; *y = u*(c1*projMatrix[2][0] - projMatrix[1][0] * c2) + v*(c2*projMatrix[0][0] - projMatrix[2][0] * c0) + projMatrix[1][0] * c0 - c1*projMatrix[0][0]; *y /= v*(projMatrix[2][0] * projMatrix[0][1] - projMatrix[2][1] * projMatrix[0][0]) + u*(projMatrix[1][0] * projMatrix[2][1] - projMatrix[1][1] * projMatrix[2][0]) + projMatrix[0][0] * projMatrix[1][1] - projMatrix[1][0] * projMatrix[0][1]; *x = (*y)*(projMatrix[0][1] - projMatrix[2][1] * u) + c0 - c2*u; *x /= projMatrix[2][0] * u - projMatrix[0][0]; } // projUVZtoXY /** **calcular u,v,z1 depended on x,y,z **z1 is after projection and z is before projection.z1 is return value **/ double projXYZtoUV(double projMatrix[4][4], double x, double y, double z, double *u, double *v) { double w; *u = projMatrix[0][0] * x + projMatrix[0][1] * y + projMatrix[0][2] * z + projMatrix[0][3]; *v = projMatrix[1][0] * x + projMatrix[1][1] * y + projMatrix[1][2] * z + projMatrix[1][3]; w = projMatrix[2][0] * x + projMatrix[2][1] * y + projMatrix[2][2] * z + projMatrix[2][3]; *u /= w; *v /= w; // image (0,0) is bottom lefthand corner *v = (double)m_Height - *v - 1.0; return w; } // projXYZtoUV /** **/ void pointProject_from_ref_to_otherView(double pts[8][3], int ref, int u, int v, unsigned char d) { double x, y, z = DepthLevelToZ(d); //printf("Testing projection of pt (%d,%d) in camera 0 with d = %d (z = %f) to other cameras\n", u, v, d, z); projUVZtoXY(m_CalibParams[ref].m_ProjMatrix, (double)u, (double)v, z, &x, &y); //printf("3D pt = (%f, %f, %f) [coordinates wrt reference camera]\n", x, y, z); for (int cam = 0; cam<8; cam++) { double *pt = pts[cam]; pt[0] = 0; pt[1] = 0; pt[2] = 0; } for (int cam = 0; cam<8; cam++) { double *pt = pts[cam]; pt[2] = projXYZtoUV(m_CalibParams[cam].m_ProjMatrix, x, y, z, &pt[0], &pt[1]); //printf("Camera %d: (%f, %f)\n", cam, pt[0], pt[1]); pt[2] = ZToDepthLever(pt[2]); } } /** **wraping image,ref represent reference cam,proj represent projection cam **the kernal code **/ void wrapingImage(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut, cv::Mat &imageDepthOut) { for (int v = 0; v < imageColor.rows; v++) for (int u = 0; u < imageColor.cols; u++) { double d = imageDepth.at<cv::Vec3b>(v, u)[0]; pointProject_from_ref_to_otherView(pts, ref, u, v, d); int u1 = (int)pts[proj][0]; int v1 = (int)pts[proj][1]; int k1 = (int)pts[proj][2]; if (u1 < 0 || u1 >= imageColor.cols - 1 || v1 < 0 || v1 >= imageColor.rows - 1) continue; if (k1 < imageDepthOut.at<cv::Vec3b>(v1, u1)[0]) continue; imageColorOut.at<cv::Vec3b>(v1, u1) = imageColor.at<cv::Vec3b>(v, u); imageDepthOut.at<cv::Vec3b>(v1, u1)[0] = k1; imageDepthOut.at<cv::Vec3b>(v1, u1)[1] = k1; imageDepthOut.at<cv::Vec3b>(v1, u1)[2] = k1; } } void wrapingImage_inverse(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut, cv::Mat &imageDepthOut) { for (int v = 0; v < imageColor.rows; v++) for (int u = 0; u < imageColor.cols; u++) { double d = imageDepthOut.at<cv::Vec3b>(v, u)[0]; if (d == 0) continue; if (imageColorOut.at<cv::Vec3b>(v, u)[0] != 0 && imageColorOut.at<cv::Vec3b>(v, u)[1] != 0 && imageColorOut.at<cv::Vec3b>(v, u)[2] != 0) continue; pointProject_from_ref_to_otherView(pts, proj, u, v, d); int u1 = (int)pts[ref][0]; int v1 = (int)pts[ref][1]; int k1 = (int)pts[ref][2]; if (u1 < 0 || u1 >= imageColor.cols - 1 || v1 < 0 || v1 >= imageColor.rows - 1) continue; imageColorOut.at<cv::Vec3b>(v, u) = imageColor.at<cv::Vec3b>(v1, u1); } } void dipslay(char *calibParam, char *refColor, char *refDepth, char *refColor2, char *refDepth2, char *actColor) { //initialize projection_Matrix InitializeFromFile(calibParam); //display projection_Matrix for (int i = 0; i < m_NumCameras; i++){ for (int j = 0; j < 3; j++){ for (int k = 0; k < 3; k++) std::cout << m_CalibParams[i].m_K[j][k] << "\t"; std::cout << std::endl; } for (int j = 0; j < 3; j++){ for (int k = 0; k < 3; k++) std::cout << m_CalibParams[i].m_RotMatrix[j][k] << "\t"; std::cout << std::endl; } for (int k = 0; k < 3; k++) std::cout << m_CalibParams[i].m_Trans[k] << "\t"; std::cout << std::endl; std::cout << std::endl; std::cout << std::endl; std::cout << std::endl; } //suspend and users enter a digit int aa; //std::cin >> aa; //read color image and depth image of refrence cv::Mat imageColor = cv::imread(refColor); cv::Mat imageDepth = cv::imread(refDepth); cv::Mat imageColor2 = cv::imread(refColor2); cv::Mat imageDepth2 = cv::imread(refDepth2); //read true image used to compare cv::Mat imageColor_actual = cv::imread(actColor); //set reference cam int ref = 5; int ref2 = 7; //set projection cam int proj = 6; //test code pointProject_from_ref_to_otherView(pts, ref, 700, 700, imageDepth.at<cv::Vec3b>(700, 700)[0]); for (int i = 0; i < 8; i++) { std::cout << pts[i][0] << "\t" << pts[i][1] << std::endl; } //std::cin >> aa; //define two variable of output cv::Mat imageColorOut; cv::Mat imageColorOut2; cv::Mat imageDepthOut; cv::Mat imageDepthOut2; imageColorOut.create(imageColor.rows, imageColor.cols, imageColor.type()); imageColorOut2.create(imageColor.rows, imageColor.cols, imageColor.type()); imageDepthOut.create(imageColor.rows, imageColor.cols, imageColor.type()); imageDepthOut2.create(imageColor.rows, imageColor.cols, imageColor.type()); for (int v = 0; v < imageColor.rows; v++) { for (int u = 0; u < imageColor.cols; u++) { imageColorOut.at<cv::Vec3b>(v, u)[0] = 0; imageColorOut.at<cv::Vec3b>(v, u)[1] = 0; imageColorOut.at<cv::Vec3b>(v, u)[2] = 0; imageColorOut2.at<cv::Vec3b>(v, u)[0] = 0; imageColorOut2.at<cv::Vec3b>(v, u)[1] = 0; imageColorOut2.at<cv::Vec3b>(v, u)[2] = 0; imageDepthOut.at<cv::Vec3b>(v, u)[0] = 0; imageDepthOut.at<cv::Vec3b>(v, u)[1] = 0; imageDepthOut.at<cv::Vec3b>(v, u)[2] = 0; imageDepthOut2.at<cv::Vec3b>(v, u)[0] = 0; imageDepthOut2.at<cv::Vec3b>(v, u)[1] = 0; imageDepthOut2.at<cv::Vec3b>(v, u)[2] = 0; } } //save_dir char *save_dir = "C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\"; //wraping from reference view to virtruel view //cv::medianBlur(imageDepth, imageDepth, 7); //cv::medianBlur(imageDepth2, imageDepth2, 7); cv::Mat element3(3, 3, CV_8U, cv::Scalar(1)); cv::morphologyEx(imageDepth, imageDepth, cv::MORPH_CLOSE, element3); cv::morphologyEx(imageDepth2, imageDepth2, cv::MORPH_CLOSE, element3); cv::medianBlur(imageDepth, imageDepth, 5); cv::medianBlur(imageDepth2, imageDepth2, 5); //cv::GaussianBlur(imageDepth, imageDepth, cv::Size(5, 5), 1.5); // 高斯平滑 模糊 线性滤波器 //cv::GaussianBlur(imageDepth2, imageDepth2, cv::Size(5, 5), 1.5); // 高斯平滑 模糊 线性滤波器 //cv::blur(imageDepth, imageDepth, cv::Size(5, 5)); //cv::blur(imageDepth2, imageDepth2, cv::Size(5, 5)); //wrapingImage(ref2, proj, imageColor2, imageDepth2, imageColorOut2, imageDepthOut2); wrapingImage(ref, proj, imageColor, imageDepth, imageColorOut, imageDepthOut); wrapingImage(ref2, proj, imageColor2, imageDepth2, imageColorOut2, imageDepthOut2); cv::medianBlur(imageDepthOut, imageDepthOut, 5); cv::medianBlur(imageDepthOut2, imageDepthOut2, 5); cv::imshow("virtruel_Depth_image", imageDepthOut); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Depth_image01.jpg", imageDepthOut); cv::imshow("virtruel_Depth_image2", imageDepthOut2); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Depth_image21.jpg", imageDepthOut2); for (int v = 0; v < imageColor.rows; v++) { for (int u = 0; u < imageColor.cols; u++) { imageColorOut.at<cv::Vec3b>(v, u)[0] = 0; imageColorOut.at<cv::Vec3b>(v, u)[1] = 0; imageColorOut.at<cv::Vec3b>(v, u)[2] = 0; imageColorOut2.at<cv::Vec3b>(v, u)[0] = 0; imageColorOut2.at<cv::Vec3b>(v, u)[1] = 0; imageColorOut2.at<cv::Vec3b>(v, u)[2] = 0; } } cv::Mat element4(5, 5, CV_8U, cv::Scalar(1)); cv::morphologyEx(imageDepthOut, imageDepthOut, cv::MORPH_CLOSE, element4); cv::Mat element5(5, 5, CV_8U, cv::Scalar(1)); cv::morphologyEx(imageDepthOut2, imageDepthOut2, cv::MORPH_CLOSE, element5); //wrapingImage_inverse(ref2, proj, imageColor2, imageDepth2, imageColorOut2, imageDepthOut2); wrapingImage_inverse(ref, proj, imageColor, imageDepth, imageColorOut, imageDepthOut); wrapingImage_inverse(ref2, proj, imageColor2, imageDepth2, imageColorOut, imageDepthOut2); //display reference_image /*cv::imshow("reference_image", imageColor); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\reference_image.jpg", imageColor);*/ //display virtruel_image //cv::medianBlur(imageColorOut, imageColorOut, 3); //cv::cvtColor(imageColorOut, imageColorOut, CV_BGR2GRAY); /*cv::equalizeHist(imageDepthOut, imageDepthOut); cv::equalizeHist(imageDepthOut2, imageDepthOut2);*/ cv::imshow("virtruel_Color_image01", imageColorOut); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image01.jpg", imageColorOut); cv::imshow("virtruel_Color_image21", imageColorOut2); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image21.jpg", imageColorOut2); ////display virtruel_image //cv::imshow("virtruel_Color_image21", imageColorOut2); //cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image21.jpg", imageColorOut2); //display virtruel_image //cv::medianBlur(imageDepthOut, imageDepthOut, 3); /*cv::medianBlur(imageDepthOut, imageDepthOut, 5); cv::medianBlur(imageDepthOut2, imageDepthOut2, 5);*/ /*cv::imshow("virtruel_Depth_image", imageDepthOut); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Depth_image01.jpg", imageDepthOut); cv::imshow("virtruel_Depth_image2", imageDepthOut2); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Depth_image21.jpg", imageDepthOut2);*/ //display actruel_image //cv::cvtColor(imageColor_actual, imageColor_actual, CV_BGR2GRAY); cv::imshow("actruel_image", imageColor_actual); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\actruel_image.jpg", imageColor_actual); ////mix reference and virtruel and check changes cv::Mat imageColorRefVSProj; cv::addWeighted(imageColorOut, 0.5, imageColorOut2, 0.5, 0.0, imageColorRefVSProj); cv::imshow("imageColorRefVSProj", imageColorRefVSProj); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\imageColorRefVSProj.jpg", imageColorRefVSProj); ////mix actual and virtruel and check changes /*cv::Mat imageColorActVSProj; cv::addWeighted(imageColor_actual, 0.5, imageColorOut2, 0.5, 0.0, imageColorActVSProj); cv::imshow("imageColorActVSProj", imageColorActVSProj); cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\imageColorActVSProj.jpg", imageColorActVSProj);*/ cv::waitKey(); } void main() { /*char *refColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam2\\color-cam2-f060.jpg"; char *refDepth = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam2\\depth-cam2-f060.png"; char *refColor2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam4\\color-cam4-f060.jpg"; char *refDepth2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam4\\depth-cam4-f060.png"; char *calibParam = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\calibParams-breakdancers.txt"; char *actColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam3\\color-cam3-f060.jpg";*/ char *refColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Ballet\\cam5\\color-cam5-f000.jpg"; char *refDepth = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Ballet\\cam5\\depth-cam5-f000.png"; char *refColor2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Ballet\\cam7\\color-cam7-f000.jpg"; char *refDepth2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Ballet\\cam7\\depth-cam7-f000.png"; char *calibParam = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Ballet\\calibParams-ballet.txt"; char *actColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Ballet\\cam6\\color-cam6-f000.jpg"; char *ref_img = "C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image01.jpg"; char *act_img = "C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\actruel_image.jpg"; dipslay(calibParam, refColor, refDepth, refColor2, refDepth2, actColor); double value_psnr = psnr(ref_img, act_img); printf("psnr=%lf\n", value_psnr); double value_ssim = ssim(ref_img, act_img); printf("ssim=%lf\n", value_ssim); getchar(); int aa; std::cin >> aa; } #endif