任意n张图像拼接_效果很好_计算机视觉大作业1终版

#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include<time.h>
using namespace std;
using namespace cv;
using namespace cv::detail;
//定义参数
vector<string> img_names;
bool try_gpu = false;
double work_megapix = 0.6;//图像匹配的分辨率大小,图像的面积尺寸变为work_megapix*100000

double seam_megapix = 0.1;//拼接缝像素的大小
double compose_megapix =0.6;//拼接分辨率
float conf_thresh = 1.f;//两幅图来自同一全景图的置信度
WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;//波形校验,水平
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;//光照补偿方法,默认是gain_blocks
float match_conf = 0.3f;//特征点检测置信等级,最近邻匹配距离与次近邻匹配距离的比值,surf默认为0.65
int blend_type = Blender::MULTI_BAND;//融合方法,默认是多频段融合
float blend_strength = 5;//融合强度,0 - 100.默认是5.
string result_name = "result.jpg";//输出图像的文件名

int main()
{
	clock_t start,finish;
   double totaltime;
   start=clock();
	int argc = 10;
	char* argv[] = {"1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg",
		"10.jpg"
	};

	for (int i = 0; i < argc; ++i)
		img_names.push_back(argv[i]);
	int num_images = static_cast<int>(img_names.size());
	double work_scale = 1, seam_scale = 1, compose_scale = 1;
	//特征点检测以及对图像进行预处理(尺寸缩放),然后计算每幅图形的特征点,以及特征点描述子
	cout<<"Finding features..."<<endl;

	Ptr<FeaturesFinder> finder;
	finder = new SurfFeaturesFinder();///采用Surf特征点检测

	Mat full_img1,full_img, img;
	vector<ImageFeatures> features(num_images);
	vector<Mat> images(num_images);
	vector<Size> full_img_sizes(num_images);
	double seam_work_aspect = 1;

	for (int i = 0; i < num_images; ++i)
	{
		full_img1 = imread(img_names[i]);
		resize(full_img1,full_img, Size(400,300));
		full_img_sizes[i] = full_img.size();

		//计算work_scale,将图像resize到面积在work_megapix*10^6以下
		work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));

		resize(full_img, img, Size(), work_scale, work_scale);
		//将图像resize到面积在work_megapix*10^6以下
		seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
		seam_work_aspect = seam_scale / work_scale;
		// 计算图像特征点,以及计算特征点描述子,并将img_idx设置为i
		(*finder)(img, features[i]);
		features[i].img_idx = i;
		cout<<"Features in image #" << i+1 << ": " << features[i].keypoints.size()<<endl;
		//将源图像resize到seam_megapix*10^6,并存入image[]中
		resize(full_img, img, Size(), seam_scale, seam_scale);
		images[i] = img.clone();
	}

	finder->collectGarbage();
	full_img.release();
	img.release();

	//对图像进行两两匹配
	cout<<"Pairwise matching"<<endl;

	//使用最近邻和次近邻匹配,对任意两幅图进行特征点匹配
	vector<MatchesInfo> pairwise_matches;
	BestOf2NearestMatcher matcher(try_gpu, match_conf);//最近邻和次近邻法
	matcher(features, pairwise_matches); //对每两个图片进行匹配
	matcher.collectGarbage();
	//将置信度高于门限的所有匹配合并到一个集合中
	///只留下确定是来自同一全景图的图片

	vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
	vector<Mat> img_subset;
	vector<string> img_names_subset;
	vector<Size> full_img_sizes_subset;
	for (size_t i = 0; i < indices.size(); ++i)
	{
		img_names_subset.push_back(img_names[indices[i]]);
		img_subset.push_back(images[indices[i]]);
		full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
	}

	images = img_subset;
	img_names = img_names_subset;
	full_img_sizes = full_img_sizes_subset;

	// 检查图片数量是否依旧满足要求
	num_images = static_cast<int>(img_names.size());
	if (num_images < 2)
	{
	cout<<"Need more images"<<endl;
	return -1;
	}

	HomographyBasedEstimator estimator;//基于单应性的估计量
	vector<CameraParams> cameras;//相机参数
	estimator(features, pairwise_matches, cameras);

	for (size_t i = 0; i < cameras.size(); ++i)
	{
		Mat R;
		cameras[i].R.convertTo(R, CV_32F);
		cameras[i].R = R;
		cout<<"Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K()<<endl;
	}

	Ptr<detail::BundleAdjusterBase> adjuster;//光束调整器参数
	adjuster = new detail::BundleAdjusterRay();//使用Bundle Adjustment(光束法平差)方法对所有图片进行相机参数校正

	adjuster->setConfThresh(conf_thresh);//设置配置阈值
	Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
	refine_mask(0,0) = 1;
	refine_mask(0,1) = 1;
	refine_mask(0,2) = 1;
	refine_mask(1,1) = 1;
	refine_mask(1,2) = 1;
	adjuster->setRefinementMask(refine_mask);
	(*adjuster)(features, pairwise_matches, cameras);//进行矫正

	// 求出的焦距取中值和所有图片的焦距并构建camera参数,将矩阵写入camera
	vector<double> focals;
	for (size_t i = 0; i < cameras.size(); ++i)
	{
		cout<<"Camera #" << indices[i]+1 << ":\n" << cameras[i].K()<<endl;
		focals.push_back(cameras[i].focal);
	}

	sort(focals.begin(), focals.end());
	float warped_image_scale;
	if (focals.size() % 2 == 1)
		warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
	else
		warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

	///波形矫正
		vector<Mat> rmats;
		for (size_t i = 0; i < cameras.size(); ++i)
			rmats.push_back(cameras[i].R);
		waveCorrect(rmats, wave_correct);////波形矫正
		for (size_t i = 0; i < cameras.size(); ++i)
			cameras[i].R = rmats[i];

	cout<<"Warping images ... "<<endl;

	vector<Point> corners(num_images);//统一坐标后的顶点
	vector<Mat> masks_warped(num_images);
	vector<Mat> images_warped(num_images);
	vector<Size> sizes(num_images);
	vector<Mat> masks(num_images);//融合掩码

	// 准备图像融合掩码
	for (int i = 0; i < num_images; ++i)
	{
		masks[i].create(images[i].size(), CV_8U);
		masks[i].setTo(Scalar::all(255));
	}

	//弯曲图像和融合掩码

	Ptr<WarperCreator> warper_creator;
	warper_creator = new cv::SphericalWarper();

	Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));

	for (int i = 0; i < num_images; ++i)
	{
		Mat_<float> K;
		cameras[i].K().convertTo(K, CV_32F);
		float swa = (float)seam_work_aspect;
		K(0,0) *= swa; K(0,2) *= swa;
		K(1,1) *= swa; K(1,2) *= swa;

		corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);//计算统一后坐标顶点
		sizes[i] = images_warped[i].size();

		warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);//弯曲当前图像
	}

	vector<Mat> images_warped_f(num_images);
	for (int i = 0; i < num_images; ++i)
		images_warped[i].convertTo(images_warped_f[i], CV_32F);

	Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);//建立补偿器以进行关照补偿,补偿方法是gain_blocks
	compensator->feed(corners, images_warped, masks_warped);

	//查找接缝
	Ptr<SeamFinder> seam_finder;
	seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
    seam_finder->find(images_warped_f, corners, masks_warped);

	// 释放未使用的内存
	images.clear();
	images_warped.clear();
	images_warped_f.clear();
	masks.clear();

	//////图像融合
	cout<<"Compositing..."<<endl;

	Mat img_warped, img_warped_s;
	Mat dilated_mask, seam_mask, mask, mask_warped;
	Ptr<Blender> blender;

	double compose_work_aspect = 1;

	for (int img_idx = 0; img_idx < num_images; ++img_idx)
	{
		cout<<"Compositing image #" << indices[img_idx]+1<<endl;
		//由于以前进行处理的图片都是以work_scale进行缩放的,所以图像的内参
		//corner(统一坐标后的顶点),mask(融合的掩码)都需要重新计算

		// 读取图像和做必要的调整

		full_img1 = imread(img_names[img_idx]);
		resize(full_img1,full_img, Size(400,300));
		compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
		compose_work_aspect = compose_scale / work_scale;
		// 更新弯曲图像比例
		warped_image_scale *= static_cast<float>(compose_work_aspect);
		warper = warper_creator->create(warped_image_scale);

		// 更新corners和sizes
		for (int i = 0; i < num_images; ++i)
		{
			// 更新相机以下特性
			cameras[i].focal *= compose_work_aspect;
			cameras[i].ppx *= compose_work_aspect;
			cameras[i].ppy *= compose_work_aspect;

			// 更新corners和sizes
			Size sz = full_img_sizes[i];
			if (std::abs(compose_scale - 1) > 1e-1)
			{
				sz.width = cvRound(full_img_sizes[i].width * compose_scale);
				sz.height = cvRound(full_img_sizes[i].height * compose_scale);
			}

			Mat K;
			cameras[i].K().convertTo(K, CV_32F);
			Rect roi = warper->warpRoi(sz, K, cameras[i].R);
			corners[i] = roi.tl();
			sizes[i] = roi.size();
		}

		if (abs(compose_scale - 1) > 1e-1)
			resize(full_img, img, Size(), compose_scale, compose_scale);
		else
			img = full_img;
		full_img.release();
		Size img_size = img.size();

		Mat K;
		cameras[img_idx].K().convertTo(K, CV_32F);
		// 扭曲当前图像
		warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
		// 扭曲当前图像掩模
		mask.create(img_size, CV_8U);
		mask.setTo(Scalar::all(255));
		warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

		// 曝光补偿
		compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);

		img_warped.convertTo(img_warped_s, CV_16S);
		img_warped.release();
		img.release();
		mask.release();

		dilate(masks_warped[img_idx], dilated_mask, Mat());
		resize(dilated_mask, seam_mask, mask_warped.size());
		mask_warped = seam_mask & mask_warped;
		//初始化blender
		if (blender.empty())
		{
			blender = Blender::createDefault(blend_type, try_gpu);
			Size dst_sz = resultRoi(corners, sizes).size();
			float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
			if (blend_width < 1.f)
				blender = Blender::createDefault(Blender::NO, try_gpu);
			else
			{
				MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
				mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
				cout<<"Multi-band blender, number of bands: " << mb->numBands()<<endl;
			}
			//根据corners顶点和图像的大小确定最终全景图的尺寸
			blender->prepare(corners, sizes);
		}

		// // 融合当前图像
		blender->feed(img_warped_s, mask_warped, corners[img_idx]);
	}

	Mat result, result_mask;
	blender->blend(result, result_mask);

	imwrite(result_name, result);

	  finish=clock();
   totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
   cout<<"\n此程序的运行时间为"<<totaltime<<"秒!"<<endl;

	return 0;
}

时间: 2024-11-07 10:05:57

任意n张图像拼接_效果很好_计算机视觉大作业1终版的相关文章

opencv2实现路线路牌检测_计算机视觉大作业2终版

main.cpp #include<stdio.h> #include <iostream> #include <vector> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include<string> #include <sstream> #include "linefinder.h" #inclu

opencv2实现n张图像拼接_stitcher具体之代码2(简化版)_计算机视觉大作业2

#include <iostream> #include <fstream> #include <string> #include "opencv2/opencv_modules.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/stitching/detail/autocalib.hpp" #include "opencv2/

opencv2实现单张图片的路线路牌检测_计算机视觉大作业2

有好多代码没有用 linefiner.h #if !defined LINEF #define LINEF #include<cmath> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #define PI 3.1415926 class LineFinder { private: // original image cv::Mat img; // vector conta

opencv2实现多张图片路线路牌检测_计算机视觉大作业2

linefinder.h同上一篇博文 main.cpp /*------------------------------------------------------------------------------------------*This file contains material supporting chapter 7 of the cookbook: Computer Vision Programming using the OpenCV Library. by Robert

opencv2实现多张图片路线路牌(直线和圆)检测并将处理后的图片合成视频_计算机视觉大作业2

linefinder.h #if !defined LINEF #define LINEF #include<cmath> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #define PI 3.1415926 using namespace cv; using namespace std; class LineFinder { private: // original i

CSS3_3D效果(IE10_火狐_谷歌)

好久没写博客了,看了下记录,上次最后写的最后一篇已经是8月1号了,最近有些小东西整理下,当巩固吧 废话少说,直奔本文主题 css3提供了很多新鲜好玩的东西,transform就是其中一个,可以进行 2D 或 3D 转换.先来看看兼容性: 导航菜单 rotateX(deg)--沿着 X 轴的 3D 旋转 translateZ(25px)--定义 3D 转换,只是用 Z 轴的值 元素内两个span,后者图片背景叠加的效果,背景也渐变处理 效果: 代码: <!DOCTYPE html> <ht

任意两张带透明图像的一种形状过渡效果

一直想要一个Flash 那种形状渐变效果,这两天苦思冥想了一番实现了,先看效果. 一开始是往把贴图alpha通道识别成路径,建模想办法拉顶点的方向去,想来想去不是个好办法,后来还是决定直接基于位图实现,尽量采用gpu可以处理的方式. 然后往这边思考后,脑子里就浮现出了ditance field 这个概念,alpha表达的还是颜色,直接插值alpha会呈现出颜色渐变的结果. 而distance field 反映的是离边的远近,插值他就会呈现出每个像素离边的距离的变化. 确定方案后就是实现了. 我采

一个效果很华丽的仿桌面APP,却胜似Launcher

开发Android APP的同学是否对于Launcher实现的绚丽效果而痴迷呢?什么,连Android Launcher是什么都不知道.好吧,拿起侬的手机,在解锁后的首页界面上左右滑动滑动,体验体验,这个就是Launcher. Launcher其实也是一个APP,不过人家是系统级别的.虽然各个android手机厂商对Launcher的定制化程度比较高,但是为了避免用户使用的困惑,Launcher的操作和功能基本都差不多.下面简单介绍下Launcher桌面的几个共同特征. 1.分屏显示APP图标和

gitHUb上边一个效果很好的仪表盘Library

gitHUb上边一个效果很好的仪表盘Library,有需要的可以去下载看看:https://github.com/glomadrian/velocimeter-view 是用AS写的,鉴于我现在还是Eclipse只有自己在弄弄,然后看下效果 别的不说了,有需要Eclipse的去这个地址下:http://download.csdn.net/detail/u012808234/8809373