车道线检测是辅助驾驶和自动驾驶的重要研究内容,相关的算法研究已经延续了20多年,本系列文章旨在帮助初学者在阅读文献之余快速上手构建自己的项目。OpenCV极大地方便了机器视觉应用的编写,为了能在刚起步时就看得远些,我们需要站在OpenCV巨人的肩膀上。
先看一个比较简单的程序,Github地址
框架
class LaneDetect
封装核心算法void makeFromVid(string path)
根据视频文件目录读入,对视频中的图像帧进行处理- 主函数 调用makeFromVid
核心算法
整个算法的核心在blobRemoval函数中:
- findContours函数提取轮廓
- 对找到的轮廓进行筛选:
- contourArea轮廓足够大(高于设定的阈值minSize)
- minAreaRect拟合矩形的有足够长的边(高于设定的阈值longLane )
- 根据矩形的偏角和位置再筛选
- 绘制满足条件的轮廓
这个算法非常简单,效果较差。属于利用车道线的基本几何特征和位置的算法。
完整源程序
/*TODO
* improve edge linking
* remove blobs whose axis direction doesnt point towards vanishing pt
* Parallelisation
* lane prediction
*/
#include <opencv2/core/core.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
#include <math.h>
#include <time.h>
using namespace std;
using namespace cv;
clock_t start, stop;
class LaneDetect
{
public:
Mat currFrame; //stores the upcoming frame
Mat temp; //stores intermediate results
Mat temp2; //stores the final lane segments
int diff, diffL, diffR;
int laneWidth;
int diffThreshTop;
int diffThreshLow;
int ROIrows;
int vertical_left;
int vertical_right;
int vertical_top;
int smallLaneArea;
int longLane;
int vanishingPt;
float maxLaneWidth;
//to store various blob properties
Mat binary_image; //used for blob removal
int minSize;
int ratio;
float contour_area;
float blob_angle_deg;
float bounding_width;
float bounding_length;
Size2f sz;
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
RotatedRect rotated_rect;
LaneDetect(Mat startFrame)
{
//currFrame = startFrame; //if image has to be processed at original size
currFrame = Mat(320,480,CV_8UC1,0.0); //initialised the image size to 320x480
resize(startFrame, currFrame, currFrame.size()); // resize the input to required size
temp = Mat(currFrame.rows, currFrame.cols, CV_8UC1,0.0);//stores possible lane markings
temp2 = Mat(currFrame.rows, currFrame.cols, CV_8UC1,0.0);//stores finally selected lane marks
vanishingPt = currFrame.rows/2; //for simplicity right now
ROIrows = currFrame.rows - vanishingPt; //rows in region of interest
minSize = 0.00015 * (currFrame.cols*currFrame.rows); //min size of any region to be selected as lane
maxLaneWidth = 0.025 * currFrame.cols; //approximate max lane width based on image size
smallLaneArea = 7 * minSize;
longLane = 0.3 * currFrame.rows;
ratio = 4;
//these mark the possible ROI for vertical lane segments and to filter vehicle glare
vertical_left = 2*currFrame.cols/5;
vertical_right = 3*currFrame.cols/5;
vertical_top = 2*currFrame.rows/3;
namedWindow("lane",2);
namedWindow("midstep", 2);
namedWindow("currframe", 2);
namedWindow("laneBlobs",2);
getLane();
}
void updateSensitivity()
{
int total=0, average =0;
for(int i= vanishingPt; i<currFrame.rows; i++)
for(int j= 0 ; j<currFrame.cols; j++)
total += currFrame.at<uchar>(i,j);
average = total/(ROIrows*currFrame.cols);
cout<<"average : "<<average<<endl;
}
void getLane()
{
//medianBlur(currFrame, currFrame,5 );
// updateSensitivity();
//ROI = bottom half
for(int i=vanishingPt; i<currFrame.rows; i++)
for(int j=0; j<currFrame.cols; j++)
{
temp.at<uchar>(i,j) = 0;
temp2.at<uchar>(i,j) = 0;
}
imshow("currframe", currFrame);
blobRemoval();
}
void markLane()
{
for(int i=vanishingPt; i<currFrame.rows; i++)
{
//IF COLOUR IMAGE IS GIVEN then additional check can be done
// lane markings RGB values will be nearly same to each other(i.e without any hue)
//min lane width is taken to be 5
laneWidth =5+ maxLaneWidth*(i-vanishingPt)/ROIrows;
for(int j=laneWidth; j<currFrame.cols- laneWidth; j++)
{
diffL = currFrame.at<uchar>(i,j) - currFrame.at<uchar>(i,j-laneWidth);
diffR = currFrame.at<uchar>(i,j) - currFrame.at<uchar>(i,j+laneWidth);
diff = diffL + diffR - abs(diffL-diffR);
//1 right bit shifts to make it 0.5 times
diffThreshLow = currFrame.at<uchar>(i,j)>>1;
//diffThreshTop = 1.2*currFrame.at<uchar>(i,j);
//both left and right differences can be made to contribute
//at least by certain threshold (which is >0 right now)
//total minimum Diff should be atleast more than 5 to avoid noise
if (diffL>0 && diffR >0 && diff>5)
if(diff>=diffThreshLow /*&& diff<= diffThreshTop*/ )
temp.at<uchar>(i,j)=255;
}
}
}
void blobRemoval()
{
markLane();
// find all contours in the binary image
temp.copyTo(binary_image);
findContours(binary_image, contours,
hierarchy, CV_RETR_CCOMP,
CV_CHAIN_APPROX_SIMPLE);
// for removing invalid blobs
if (!contours.empty())
{
for (size_t i=0; i<contours.size(); ++i)
{
//====conditions for removing contours====//
contour_area = contourArea(contours[i]) ;
//blob size should not be less than lower threshold
if(contour_area > minSize)
{
rotated_rect = minAreaRect(contours[i]);
sz = rotated_rect.size;
bounding_width = sz.width;
bounding_length = sz.height;
//openCV selects length and width based on their orientation
//so angle needs to be adjusted accordingly
blob_angle_deg = rotated_rect.angle;
if (bounding_width < bounding_length)
blob_angle_deg = 90 + blob_angle_deg;
//if such big line has been detected then it has to be a (curved or a normal)lane
if(bounding_length>longLane || bounding_width >longLane)
{
drawContours(currFrame, contours,i, Scalar(255), CV_FILLED, 8);
drawContours(temp2, contours,i, Scalar(255), CV_FILLED, 8);
}
//angle of orientation of blob should not be near horizontal or vertical
//vertical blobs are allowed only near center-bottom region, where centre lane mark is present
//length:width >= ratio for valid line segments
//if area is very small then ratio limits are compensated
else if ((blob_angle_deg <-10 || blob_angle_deg >-10 ) &&
((blob_angle_deg > -70 && blob_angle_deg < 70 ) ||
(rotated_rect.center.y > vertical_top &&
rotated_rect.center.x > vertical_left && rotated_rect.center.x < vertical_right)))
{
if ((bounding_length/bounding_width)>=ratio || (bounding_width/bounding_length)>=ratio
||(contour_area< smallLaneArea && ((contour_area/(bounding_width*bounding_length)) > .75) &&
((bounding_length/bounding_width)>=2 || (bounding_width/bounding_length)>=2)))
{
drawContours(currFrame, contours,i, Scalar(255), CV_FILLED, 8);
drawContours(temp2, contours,i, Scalar(255), CV_FILLED, 8);
}
}
}
}
}
imshow("midstep", temp);
imshow("laneBlobs", temp2);
imshow("lane",currFrame);
}
void nextFrame(Mat &nxt)
{
//currFrame = nxt; //if processing is to be done at original size
resize(nxt ,currFrame, currFrame.size()); //resizing the input image for faster processing
getLane();
}
Mat getResult()
{
return temp2;
}
};//end of class LaneDetect
void makeFromVid(string path)
{
Mat frame;
VideoCapture cap(path); // open the video file for reading
if ( !cap.isOpened() ) // if not success, exit program
cout << "Cannot open the video file" << endl;
//cap.set(CV_CAP_PROP_POS_MSEC, 300); //start the video at 300ms
double fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Input video‘s Frame per seconds : " << fps << endl;
cap.read(frame);
LaneDetect detect(frame);
while(1)
{
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
break;
}
cvtColor(frame, frame, CV_BGR2GRAY);
//start = clock();
detect.nextFrame(frame);
//stop =clock();
// cout<<"fps : "<<1.0/(((double)(stop-start))/ CLOCKS_PER_SEC)<<endl;
if(waitKey(10) == 27) //wait for ‘esc‘ key press for 10 ms. If ‘esc‘ key is pressed, break loop
{
cout<<"video paused!, press q to quit, any other key to continue"<<endl;
if(waitKey(0) == ‘q‘)
{
cout << "terminated by user" << endl;
break;
}
}
}
}
int main()
{
makeFromVid("/home/yash/opencv-2.4.10/programs/output.avi");
// makeFromVid("/home/yash/opencv-2.4.10/programs/road.m4v");
waitKey(0);
destroyAllWindows();
}
时间: 2024-10-10 07:04:14