图像变换是指将一幅图像变换为图像数据的另一种表现形式,例如将图像进行傅立叶变换,或者对图像进行X,Y方向的求导等,经过这些变换,可以将图像数据处理中的某些问题换一个别的角度想办法,所以图像变换是图像处理的时候比较常用的一种方法.
一.sobel算子
sobel算子是一个用于边缘检测的离散微分算子,其结合了高斯平滑和微分求导,用于计算图像灰度函数的近似梯度,在图像的任何一点使用该函数,都将产生对应的梯度矢量或者是发矢量,简单地说,sobel算子适用于计算出图像像素点之间变化幅度的算子,而边缘的变化幅度是最剧烈的,所以sobel算子能用来做边缘检测.
sobel算子分方向,在x,y方向指定阶数.
API:void sobel(输入源,输出,int 输出图像深度,int X方向差分阶数,int y方向差分阶数,int sobel邻域核大小,double 可选的缩放因子,double 可选的delata值,double 边界模式)
注:源图像和目标图像尺寸类型一致,,输出图像的深度根据源图像的深度决定,默认为-1,输出图像必须比源图像的数据更宽,sobel核大小默认为3,只能为1,3,5,7其中之一,缩放因子默认为-1,不缩放,delate值默认为.0
实际功能代码如下
Mat srcImage,sobelxImage,sobelxAbsImage,sobelyImage,sobelyAbsImage,dstImage; const int g_sobelCoreMax = 2; int g_sobelCoreValue; const int g_deltaxMax = 9; int g_deltaxValue; const int g_deltayMax = 9; int g_deltayValue; void onTrackBarSobelCore(int pos,void* userData); void onTrackBarSobelDeltax(int pos,void* userData); void onTrackBarSobelDeltay(int pos,void* userData); int main(int argc,char* argv[]) { srcImage = imread("F:\\opencv\\OpenCVImage\\sobel.jpg"); namedWindow("src image"); namedWindow("sobelx image"); namedWindow("sobely image"); namedWindow("sobelxy image"); g_sobelCoreValue = 0; g_deltaxValue = 0; g_deltayValue = 0; createTrackbar("core size", "src image", &g_sobelCoreValue, g_sobelCoreMax,onTrackBarSobelCore,0); createTrackbar("deltax value", "src image", &g_deltaxValue, g_deltaxMax,onTrackBarSobelDeltax,0); createTrackbar("deltay value", "src image", &g_deltayValue, g_deltayMax,onTrackBarSobelDeltay,0); onTrackBarSobelCore(g_sobelCoreValue, 0); imshow("src image", srcImage); moveWindow("src image", 0, 0); moveWindow("sobelx image", srcImage.cols, 0); moveWindow("sobely image", 0, srcImage.rows); moveWindow("sobelxy image", srcImage.cols, srcImage.rows); waitKey(0); return 0; } void onTrackBarSobelCore(int pos,void* userData) { int coreSize = g_sobelCoreValue*2+3; int deltax = g_deltaxValue+1; int deltay = g_deltayValue+1; Sobel(srcImage, sobelxImage, CV_16S, deltax, 0,coreSize); Sobel(srcImage, sobelyImage, CV_16S, 0, deltay,coreSize); convertScaleAbs(sobelxImage, sobelxAbsImage); convertScaleAbs(sobelyImage, sobelyAbsImage); addWeighted(sobelxAbsImage, 0.5, sobelyAbsImage, 0.5, 0.0, dstImage); imshow("sobelx image", sobelxAbsImage); imshow("sobely image", sobelyAbsImage); imshow("sobelxy image", dstImage); } void onTrackBarSobelDeltax(int pos,void* userData) { int coreSize = g_sobelCoreValue*2+3; int deltax = g_deltaxValue+1; int deltay = g_deltayValue+1; Sobel(srcImage, sobelxImage, CV_16S, deltax, 0,coreSize); Sobel(srcImage, sobelyImage, CV_16S, 0, deltay,coreSize); convertScaleAbs(sobelxImage, sobelxAbsImage); convertScaleAbs(sobelyImage, sobelyAbsImage); addWeighted(sobelxAbsImage, 0.5, sobelyAbsImage, 0.5, 0.0, dstImage); imshow("sobelx image", sobelxAbsImage); imshow("sobely image", sobelyAbsImage); imshow("sobelxy image", dstImage); } void onTrackBarSobelDeltay(int pos,void* userData) { int coreSize = g_sobelCoreValue*2+3; int deltax = g_deltaxValue+1; int deltay = g_deltayValue+1; Sobel(srcImage, sobelxImage, CV_16S, deltax, 0,coreSize); Sobel(srcImage, sobelyImage, CV_16S, 0, deltay,coreSize); convertScaleAbs(sobelxImage, sobelxAbsImage); convertScaleAbs(sobelyImage, sobelyAbsImage); addWeighted(sobelxAbsImage, 0.5, sobelyAbsImage, 0.5, 0.0, dstImage); imshow("sobelx image", sobelxAbsImage); imshow("sobely image", sobelyAbsImage); imshow("sobelxy image", dstImage); }
当Ksize为1的时候,仅使用1*3内核或者3*1内核,而且没有平滑操作.
二.scharr算子
当sobel算子核大小为3的时候,因为计算使用的是导数的近似值,为了解决ksize为3的时候的误差问题,opencv引入了函数scharr,scharr和sobel一样快,但是结果更加精确
另外,对于sobel,因为目标图像的深度一般比源图像的深度更深,所以为了正常的显示目标图像,我们可以使用convertScalarAbs()函数,将深度缩放为八位数字图像,便于显示和保存.
API:void scharr(源图像,目标图像, int 输出图像深度,int X方向差分阶数,int y方向差分阶数 ,double 可选的缩放因子,double 可选的delata值,double 边界模式)
注:和sobel相比少了一个邻域核大小,因为默认为3
使用例程代码如下
Mat srcImage,scharrxImage,scharrxAbsImage,scharryImage,scharryAbsImage,dstImage; const int g_deltaxMax = 0; int g_deltaxValue; const int g_deltayMax = 0; int g_deltayValue; void onTrackBarScharrDeltax(int pos,void* userData); void onTrackBarScharrDeltay(int pos,void* userData); int main(int argc,char* argv[]) { srcImage = imread("F:\\opencv\\OpenCVImage\\scharr.jpg"); namedWindow("src image"); namedWindow("scharrx image"); namedWindow("scharry image"); namedWindow("scharrxy image"); g_deltaxValue = 0; g_deltayValue = 0; createTrackbar("deltax value", "src image", &g_deltaxValue, g_deltaxMax,onTrackBarScharrDeltax,0); createTrackbar("deltay value", "src image", &g_deltayValue, g_deltayMax,onTrackBarScharrDeltay,0); onTrackBarScharrDeltax(g_deltaxValue,0); imshow("src image", srcImage); moveWindow("src image", 0, 0); moveWindow("scharrx image", srcImage.cols, 0); moveWindow("scharry image", 0, srcImage.rows); moveWindow("scharrxy image", srcImage.cols, srcImage.rows); waitKey(0); return 0; } void onTrackBarScharrDeltax(int pos,void* userData) { int deltax = g_deltaxValue+1; int deltay = g_deltayValue+1; Scharr(srcImage, scharrxImage, CV_16S, deltax, 0); Scharr(srcImage, scharryImage, CV_16S, 0, deltay); convertScaleAbs(scharrxImage, scharrxAbsImage); convertScaleAbs(scharryImage, scharryAbsImage); addWeighted(scharrxAbsImage, 0.5, scharryAbsImage, 0.5, 0.0, dstImage); imshow("scharrx image", scharrxAbsImage); imshow("scharry image", scharryAbsImage); imshow("scharrxy image", dstImage); } void onTrackBarScharrDeltay(int pos,void* userData) { int deltax = g_deltaxValue+1; int deltay = g_deltayValue+1; Scharr(srcImage, scharrxImage, CV_16S, deltax, 0); Scharr(srcImage, scharryImage, CV_16S, 0, deltay); convertScaleAbs(scharrxImage, scharrxAbsImage); convertScaleAbs(scharryImage, scharryAbsImage); addWeighted(scharrxAbsImage, 0.5, scharryAbsImage, 0.5, 0.0, dstImage); imshow("scharrx image", scharrxAbsImage); imshow("scharry image", scharryAbsImage); imshow("scharrxy image", dstImage); }
三.laplacian算子 (拉普拉斯算子)
有时候我们需要在X和y方向上同时差分,然后看整体的结果,这就需要用到laplacian算子,该算子是N维欧几里德空间中的二阶微分算子.
让一幅图的源图像减去其拉普拉斯算子的结果,图像的对比度将变得更强
API:void laplacian(源图像,目标图像,目标深度,int 邻域孔径尺寸,int 可选的缩放比例因子,int deleta可选值,int 边界模式);
注:图像深度和sobel一致,cv_8u对应cv_16s,ksize默认为1,且必须是正奇数.
实际上,laplacian是图像在x方向上的sobel算子和y方向上的sobel算子的和,使用这种算子之前,最好先进行图像的滤波操作,防止引入微笑误差.
使用代码
// lapacian拉普拉斯算子 Mat srcImage,srcImageGassianBlur,srcImageGray,laplacianImage,laplacianAbs; const int g_coreSizeMax = 5; int g_coreSizeValue; void onTrackBarCoreSize(int pos,void* userData); int main(int argc,char* argv[]) { srcImage = imread("F:\\opencv\\OpenCVImage\\laplacian.jpg"); GaussianBlur(srcImage, srcImageGassianBlur, Size(3,3), 0); cvtColor(srcImageGassianBlur, srcImageGray, CV_RGB2GRAY); namedWindow("src image"); namedWindow("dst image"); g_coreSizeValue = 0; createTrackbar("core size", "dst image", &g_coreSizeValue, g_coreSizeMax,onTrackBarCoreSize,0); onTrackBarCoreSize(g_coreSizeValue, 0); imshow("src image", srcImage); moveWindow("src image", 0, 0); moveWindow("dst image", srcImage.cols, 0); waitKey(0); return 0; } void onTrackBarCoreSize(int pos,void* userData) { int coreSize = g_coreSizeValue*2 + 1; Laplacian(srcImageGray, laplacianImage, CV_16S,coreSize); convertScaleAbs(laplacianImage, laplacianAbs); imshow("dst image", laplacianAbs); }
四.canny边缘检测
边缘检测在工程上,有极大的应用,依靠边缘,确定物体的形状,检测产品的良好程度,canny算法,是opencv中提供的很不错的边缘检测算法,其检测边缘的步骤如下.
首先是滤波,使用高斯平滑滤波卷积降噪.然后是计算梯度幅值与方向,类似于sobel ,laplacian,第三是非极大值一致,排除掉不是边缘的像素,最后是之后阈值化,使用两个阈值,并且绑定的时候考虑颜色之间的关联关系(高低阈值的比例在1:2或者1:3之间).
API:void canny(输入图像,输出图像,double 低与阈值,double 高阈值,int sobel算子孔径,bool 计算梯度幅值标志);
注:sobel算子孔径默认为3,计算梯度幅值的标记默认为false,低阈值用于控制图像边缘的连接,而高阈值用于控制边缘的初始点位置.
另外,使用canny检测算法之前,最好先对图像经过一次降噪处理.
使用例程如下
//低̨ª阈D值¦Ì和¨ª高?阈D值¦Ì默?认¨?1:3 //sobel算?子Á¨®孔¡Á径?只?能¨¹取¨?值¦Ì 3 5 7 //平?滑?滤?波¡§算?子Á¨®孔¡Á径? 3,5,7,9 Mat srcImage,grayImage,grayBlurImage,cannyImage,dstImage; const int g_blurSizeMax = 3;//平?滑?滤?波¡§孔¡Á径? int g_blurValue; const int g_sobelSizeMax = 2;//sobel孔¡Á径? int g_sobelValue; const int g_lowThresholdMax = 80;//边À?缘¦Ì检¨¬测a低̨ª阈D值¦Ì int g_lowThresholdValue; int g_upThresholdValue; void onTrackBarBlurSize(int pos,void* userData); void onTrackBarSobelSize(int pos,void* userData); void onTrackBarLowThresholdSize(int pos,void* userData); int main(int argc,char* argv[]) { srcImage = imread("F:\\opencv\\OpenCVImage\\canny2.jpg"); if(srcImage.channels() != 1) { cvtColor(srcImage, grayImage, CV_BGR2GRAY); } else { grayImage = srcImage.clone(); } namedWindow("src image"); namedWindow("dst image"); g_blurValue = 1; g_sobelValue = 1; g_lowThresholdValue = 3; g_upThresholdValue = 9; createTrackbar("blur size", "dst image", &g_blurValue, g_blurSizeMax,onTrackBarBlurSize,0); createTrackbar("sobel size", "dst image", &g_sobelValue, g_sobelSizeMax,onTrackBarSobelSize,0); createTrackbar("low threshold", "dst image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThresholdSize,0); onTrackBarBlurSize(g_blurValue, 0); imshow("src image", srcImage); moveWindow("src image", 0, 0); moveWindow("dst image", srcImage.cols, 0); waitKey(0); return 0; } void onTrackBarBlurSize(int pos,void* userData) { int blurValue = g_blurValue*2 +3; int sobelValue = g_sobelValue*2 +3; if (g_lowThresholdValue == 0) { g_lowThresholdValue = 1; } int lowThresholdValue = g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3; //平?滑?滤?波¡§ blur(srcImage, grayBlurImage, Size(blurValue,blurValue)); //计?算?canny Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue); dstImage = Scalar::all(0); srcImage.copyTo(dstImage, cannyImage); imshow("dst image", dstImage); } void onTrackBarSobelSize(int pos,void* userData) { int blurValue = g_blurValue*2 +3; int sobelValue = g_sobelValue*2 +3; if (g_lowThresholdValue == 0) { g_lowThresholdValue = 1; } int lowThresholdValue = g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3; //平?滑?滤?波¡§ blur(srcImage, grayBlurImage, Size(blurValue,blurValue)); //计?算?canny Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue); //用canny为掩码?,将src拷贝到dstimage中D,应为检测到的线条才会得到拷贝,所以,目标图上检测到的线条就会变成彩色条纹? dstImage = Scalar::all(0); srcImage.copyTo(dstImage, cannyImage); imshow("dst image", dstImage); } void onTrackBarLowThresholdSize(int pos,void* userData) { int blurValue = g_blurValue*2 +3; int sobelValue = g_sobelValue*2 +3; if (g_lowThresholdValue == 0) { g_lowThresholdValue = 1; } int lowThresholdValue = g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3; //平?滑?滤?波¡§ blur(srcImage, grayBlurImage, Size(blurValue,blurValue)); //计?算?canny Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue); dstImage = Scalar::all(0); srcImage.copyTo(dstImage, cannyImage); imshow("dst image", dstImage); }