opencv2实现n张图像拼接_stitcher具体之代码2(简化版)_计算机视觉大作业2

#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"

using namespace std;
using namespace cv;
using namespace cv::detail;

// Default command line args
vector<string> img_names;
bool preview = false;
bool try_gpu = false;
double work_megapix = 0.6;
double seam_megapix = 0.1;
double compose_megapix = 0.6;
float conf_thresh = 1.f;
string features_type = "surf";//or"orb"
string ba_cost_func = "ray";
string ba_refine_mask = "xxxxx";
bool do_wave_correct = true;
WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
bool save_graph = false;
std::string save_graph_to;
string warp_type = "spherical";
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
float match_conf = 0.3f;
string seam_find_type = "gc_color";
int blend_type = Blender::MULTI_BAND;
float blend_strength = 5;
string result_name = "result.jpg";

int main(/*int argc, char* argv[]*/)
{

<span style="white-space:pre">	</span>int argc = 10;
<span style="white-space:pre">	</span>char* argv[] = {"1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg", 
<span style="white-space:pre">		</span>"10.jpg"//, "--preview",
<span style="white-space:pre">		</span>//<span style="white-space:pre">	</span>"--try_gpu","no",
<span style="white-space:pre">		</span>//<span style="white-space:pre">	</span>"--work_megapix","0.6",
<span style="white-space:pre">		</span>//<span style="white-space:pre">	</span>//"--features","orb",
<span style="white-space:pre">		</span>//<span style="white-space:pre">	</span>"--wave_correct","no",
<span style="white-space:pre">		</span>//<span style="white-space:pre">	</span>"--wave_correct"
<span style="white-space:pre">		</span>//"--warp_type",//<span style="white-space:pre">	</span> "plane" "cylindrical""spherical""plane""cylindrical""spherical""fisheye""stereographic"
<span style="white-space:pre">		</span>//  "plane",         //"compressedPlaneA2B1" "compressedPlaneA1.5B1""compressedPlanePortraitA2B1"
<span style="white-space:pre">		</span>//<span style="white-space:pre">		</span>//   "compressedPlanePortraitA1.5B1" "paniniA2B1""paniniA1.5B1" "paniniPortraitA2B1""paniniPortraitA1.5B1"
<span style="white-space:pre">		</span>//<span style="white-space:pre">		</span> //  "mercator" "transverseMercator"
<span style="white-space:pre">		</span>//<span style="white-space:pre">		</span> "--expos_comp","no",
<span style="white-space:pre">		</span>//<span style="white-space:pre">		</span> "--seam","no","--blend","no"
<span style="white-space:pre">	</span>};

<span style="white-space:pre">	</span>for (int i = 0; i < argc; ++i)
<span style="white-space:pre">		</span>img_names.push_back(argv[i]);

<span style="white-space:pre">	</span>// Check if have enough images
<span style="white-space:pre">	</span>int num_images = static_cast<int>(img_names.size());

<span style="white-space:pre">	</span>double work_scale = 1, seam_scale = 1, compose_scale = 1;
<span style="white-space:pre">	</span>bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;

<span style="white-space:pre">	</span>cout<<"Finding features..."<<endl;

<span style="white-space:pre">	</span>Ptr<FeaturesFinder> finder;

#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU) && !defined(ANDROID)
<span style="white-space:pre">	</span>if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
<span style="white-space:pre">		</span>finder = new SurfFeaturesFinderGpu();
<span style="white-space:pre">	</span>else
#endif
<span style="white-space:pre">		</span>finder = new SurfFeaturesFinder();

<span style="white-space:pre">	</span>Mat full_img, img;
<span style="white-space:pre">	</span>vector<ImageFeatures> features(num_images);
<span style="white-space:pre">	</span>vector<Mat> images(num_images);
<span style="white-space:pre">	</span>vector<Size> full_img_sizes(num_images);
<span style="white-space:pre">	</span>double seam_work_aspect = 1;

<span style="white-space:pre">	</span>for (int i = 0; i < num_images; ++i)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>full_img = imread(img_names[i]);
<span style="white-space:pre">		</span>full_img_sizes[i] = full_img.size();

<span style="white-space:pre">		</span>if (full_img.empty())
<span style="white-space:pre">		</span>{
<span style="white-space:pre">			</span>cout<<"Can't open image " << img_names[i]<<endl;
<span style="white-space:pre">			</span>return -1;
<span style="white-space:pre">		</span>}
<span style="white-space:pre">		</span>if (work_megapix < 0)
<span style="white-space:pre">		</span>{
<span style="white-space:pre">			</span>img = full_img;
<span style="white-space:pre">			</span>work_scale = 1;
<span style="white-space:pre">			</span>is_work_scale_set = true;
<span style="white-space:pre">		</span>}
<span style="white-space:pre">		</span>else
<span style="white-space:pre">		</span>{
<span style="white-space:pre">			</span>if (!is_work_scale_set)
<span style="white-space:pre">			</span>{
<span style="white-space:pre">				</span>work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
<span style="white-space:pre">				</span>is_work_scale_set = true;
<span style="white-space:pre">			</span>}
<span style="white-space:pre">			</span>resize(full_img, img, Size(), work_scale, work_scale);
<span style="white-space:pre">		</span>}
<span style="white-space:pre">		</span>if (!is_seam_scale_set)
<span style="white-space:pre">		</span>{
<span style="white-space:pre">			</span>seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
<span style="white-space:pre">			</span>seam_work_aspect = seam_scale / work_scale;
<span style="white-space:pre">			</span>is_seam_scale_set = true;
<span style="white-space:pre">		</span>}

<span style="white-space:pre">		</span>(*finder)(img, features[i]);
<span style="white-space:pre">		</span>features[i].img_idx = i;
<span style="white-space:pre">		</span>cout<<"Features in image #" << i+1 << ": " << features[i].keypoints.size()<<endl;

<span style="white-space:pre">		</span>resize(full_img, img, Size(), seam_scale, seam_scale);
<span style="white-space:pre">		</span>images[i] = img.clone();
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>finder->collectGarbage();
<span style="white-space:pre">	</span>full_img.release();
<span style="white-space:pre">	</span>img.release();

<span style="white-space:pre">	</span>cout<<"Pairwise matching"<<endl;

<span style="white-space:pre">	</span>vector<MatchesInfo> pairwise_matches;
<span style="white-space:pre">	</span>BestOf2NearestMatcher matcher(try_gpu, match_conf);
<span style="white-space:pre">	</span>matcher(features, pairwise_matches);
<span style="white-space:pre">	</span>matcher.collectGarbage();

<span style="white-space:pre">	</span>// Leave only images we are sure are from the same panorama
<span style="white-space:pre">	</span>vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
<span style="white-space:pre">	</span>vector<Mat> img_subset;
<span style="white-space:pre">	</span>vector<string> img_names_subset;
<span style="white-space:pre">	</span>vector<Size> full_img_sizes_subset;
<span style="white-space:pre">	</span>for (size_t i = 0; i < indices.size(); ++i)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>img_names_subset.push_back(img_names[indices[i]]);
<span style="white-space:pre">		</span>img_subset.push_back(images[indices[i]]);
<span style="white-space:pre">		</span>full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>images = img_subset;
<span style="white-space:pre">	</span>img_names = img_names_subset;
<span style="white-space:pre">	</span>full_img_sizes = full_img_sizes_subset;

<span style="white-space:pre">	</span>// Check if we still have enough images
<span style="white-space:pre">	</span>num_images = static_cast<int>(img_names.size());
<span style="white-space:pre">	</span>if (num_images < 2)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>cout<<"Need more images"<<endl;
<span style="white-space:pre">		</span>return -1;
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>HomographyBasedEstimator estimator;
<span style="white-space:pre">	</span>vector<CameraParams> cameras;
<span style="white-space:pre">	</span>estimator(features, pairwise_matches, cameras);

<span style="white-space:pre">	</span>for (size_t i = 0; i < cameras.size(); ++i)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>Mat R;
<span style="white-space:pre">		</span>cameras[i].R.convertTo(R, CV_32F);
<span style="white-space:pre">		</span>cameras[i].R = R;
<span style="white-space:pre">		</span>cout<<"Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K()<<endl;
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>Ptr<detail::BundleAdjusterBase> adjuster;
<span style="white-space:pre">	</span>adjuster = new detail::BundleAdjusterRay();

<span style="white-space:pre">	</span>adjuster->setConfThresh(conf_thresh);
<span style="white-space:pre">	</span>Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
<span style="white-space:pre">	</span>refine_mask(0,0) = 1;
<span style="white-space:pre">	</span>refine_mask(0,1) = 1;
<span style="white-space:pre">	</span>refine_mask(0,2) = 1;
<span style="white-space:pre">	</span>refine_mask(1,1) = 1;
<span style="white-space:pre">	</span>refine_mask(1,2) = 1;
<span style="white-space:pre">	</span>adjuster->setRefinementMask(refine_mask);
<span style="white-space:pre">	</span>(*adjuster)(features, pairwise_matches, cameras);

<span style="white-space:pre">	</span>// Find median focal length

<span style="white-space:pre">	</span>vector<double> focals;
<span style="white-space:pre">	</span>for (size_t i = 0; i < cameras.size(); ++i)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>cout<<"Camera #" << indices[i]+1 << ":\n" << cameras[i].K()<<endl;
<span style="white-space:pre">		</span>focals.push_back(cameras[i].focal);
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>sort(focals.begin(), focals.end());
<span style="white-space:pre">	</span>float warped_image_scale;
<span style="white-space:pre">	</span>if (focals.size() % 2 == 1)
<span style="white-space:pre">		</span>warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
<span style="white-space:pre">	</span>else
<span style="white-space:pre">		</span>warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

<span style="white-space:pre">	</span>if (do_wave_correct)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>vector<Mat> rmats;
<span style="white-space:pre">		</span>for (size_t i = 0; i < cameras.size(); ++i)
<span style="white-space:pre">			</span>rmats.push_back(cameras[i].R);
<span style="white-space:pre">		</span>waveCorrect(rmats, wave_correct);
<span style="white-space:pre">		</span>for (size_t i = 0; i < cameras.size(); ++i)
<span style="white-space:pre">			</span>cameras[i].R = rmats[i];
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>cout<<"Warping images (auxiliary)... "<<endl;

<span style="white-space:pre">	</span>vector<Point> corners(num_images);
<span style="white-space:pre">	</span>vector<Mat> masks_warped(num_images);
<span style="white-space:pre">	</span>vector<Mat> images_warped(num_images);
<span style="white-space:pre">	</span>vector<Size> sizes(num_images);
<span style="white-space:pre">	</span>vector<Mat> masks(num_images);

<span style="white-space:pre">	</span>// Preapre images masks
<span style="white-space:pre">	</span>for (int i = 0; i < num_images; ++i)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>masks[i].create(images[i].size(), CV_8U);
<span style="white-space:pre">		</span>masks[i].setTo(Scalar::all(255));
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>// Warp images and their masks

<span style="white-space:pre">	</span>Ptr<WarperCreator> warper_creator;
<span style="white-space:pre">	</span>warper_creator = new cv::SphericalWarper();

<span style="white-space:pre">	</span>Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));

<span style="white-space:pre">	</span>for (int i = 0; i < num_images; ++i)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>Mat_<float> K;
<span style="white-space:pre">		</span>cameras[i].K().convertTo(K, CV_32F);
<span style="white-space:pre">		</span>float swa = (float)seam_work_aspect;
<span style="white-space:pre">		</span>K(0,0) *= swa; K(0,2) *= swa;
<span style="white-space:pre">		</span>K(1,1) *= swa; K(1,2) *= swa;

<span style="white-space:pre">		</span>corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
<span style="white-space:pre">		</span>sizes[i] = images_warped[i].size();

<span style="white-space:pre">		</span>warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>vector<Mat> images_warped_f(num_images);
<span style="white-space:pre">	</span>for (int i = 0; i < num_images; ++i)
<span style="white-space:pre">		</span>images_warped[i].convertTo(images_warped_f[i], CV_32F);

<span style="white-space:pre">	</span>Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
<span style="white-space:pre">	</span>compensator->feed(corners, images_warped, masks_warped);

<span style="white-space:pre">	</span>Ptr<SeamFinder> seam_finder;

<span style="white-space:pre">	</span>seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);

<span style="white-space:pre">	</span>seam_finder->find(images_warped_f, corners, masks_warped);

<span style="white-space:pre">	</span>// Release unused memory
<span style="white-space:pre">	</span>images.clear();
<span style="white-space:pre">	</span>images_warped.clear();
<span style="white-space:pre">	</span>images_warped_f.clear();
<span style="white-space:pre">	</span>masks.clear();

<span style="white-space:pre">	</span>cout<<"Compositing..."<<endl;

<span style="white-space:pre">	</span>Mat img_warped, img_warped_s;
<span style="white-space:pre">	</span>Mat dilated_mask, seam_mask, mask, mask_warped;
<span style="white-space:pre">	</span>Ptr<Blender> blender;
<span style="white-space:pre">	</span>//double compose_seam_aspect = 1;
<span style="white-space:pre">	</span>double compose_work_aspect = 1;

<span style="white-space:pre">	</span>for (int img_idx = 0; img_idx < num_images; ++img_idx)
<span style="white-space:pre">	</span>{
<span style="white-space:pre">		</span>cout<<"Compositing image #" << indices[img_idx]+1<<endl;

<span style="white-space:pre">		</span>// Read image and resize it if necessary
<span style="white-space:pre">		</span>full_img = imread(img_names[img_idx]);
<span style="white-space:pre">		</span>if (!is_compose_scale_set)
<span style="white-space:pre">		</span>{
<span style="white-space:pre">			</span>if (compose_megapix > 0)
<span style="white-space:pre">				</span>compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
<span style="white-space:pre">			</span>is_compose_scale_set = true;

<span style="white-space:pre">			</span>// Compute relative scales
<span style="white-space:pre">			</span>//compose_seam_aspect = compose_scale / seam_scale;
<span style="white-space:pre">			</span>compose_work_aspect = compose_scale / work_scale;

<span style="white-space:pre">			</span>// Update warped image scale
<span style="white-space:pre">			</span>warped_image_scale *= static_cast<float>(compose_work_aspect);
<span style="white-space:pre">			</span>warper = warper_creator->create(warped_image_scale);

<span style="white-space:pre">			</span>// Update corners and sizes
<span style="white-space:pre">			</span>for (int i = 0; i < num_images; ++i)
<span style="white-space:pre">			</span>{
<span style="white-space:pre">				</span>// Update intrinsics
<span style="white-space:pre">				</span>cameras[i].focal *= compose_work_aspect;
<span style="white-space:pre">				</span>cameras[i].ppx *= compose_work_aspect;
<span style="white-space:pre">				</span>cameras[i].ppy *= compose_work_aspect;

<span style="white-space:pre">				</span>// Update corner and size
<span style="white-space:pre">				</span>Size sz = full_img_sizes[i];
<span style="white-space:pre">				</span>if (std::abs(compose_scale - 1) > 1e-1)
<span style="white-space:pre">				</span>{
<span style="white-space:pre">					</span>sz.width = cvRound(full_img_sizes[i].width * compose_scale);
<span style="white-space:pre">					</span>sz.height = cvRound(full_img_sizes[i].height * compose_scale);
<span style="white-space:pre">				</span>}

<span style="white-space:pre">				</span>Mat K;
<span style="white-space:pre">				</span>cameras[i].K().convertTo(K, CV_32F);
<span style="white-space:pre">				</span>Rect roi = warper->warpRoi(sz, K, cameras[i].R);
<span style="white-space:pre">				</span>corners[i] = roi.tl();
<span style="white-space:pre">				</span>sizes[i] = roi.size();
<span style="white-space:pre">			</span>}
<span style="white-space:pre">		</span>}
<span style="white-space:pre">		</span>if (abs(compose_scale - 1) > 1e-1)
<span style="white-space:pre">			</span>resize(full_img, img, Size(), compose_scale, compose_scale);
<span style="white-space:pre">		</span>else
<span style="white-space:pre">			</span>img = full_img;
<span style="white-space:pre">		</span>full_img.release();
<span style="white-space:pre">		</span>Size img_size = img.size();

<span style="white-space:pre">		</span>Mat K;
<span style="white-space:pre">		</span>cameras[img_idx].K().convertTo(K, CV_32F);

<span style="white-space:pre">		</span>// Warp the current image
<span style="white-space:pre">		</span>warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

<span style="white-space:pre">		</span>// Warp the current image mask
<span style="white-space:pre">		</span>mask.create(img_size, CV_8U);
<span style="white-space:pre">		</span>mask.setTo(Scalar::all(255));
<span style="white-space:pre">		</span>warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

<span style="white-space:pre">		</span>// Compensate exposure
<span style="white-space:pre">		</span>compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);

<span style="white-space:pre">		</span>img_warped.convertTo(img_warped_s, CV_16S);
<span style="white-space:pre">		</span>img_warped.release();
<span style="white-space:pre">		</span>img.release();
<span style="white-space:pre">		</span>mask.release();

<span style="white-space:pre">		</span>dilate(masks_warped[img_idx], dilated_mask, Mat());
<span style="white-space:pre">		</span>resize(dilated_mask, seam_mask, mask_warped.size());
<span style="white-space:pre">		</span>mask_warped = seam_mask & mask_warped;

<span style="white-space:pre">		</span>if (blender.empty())
<span style="white-space:pre">		</span>{
<span style="white-space:pre">			</span>blender = Blender::createDefault(blend_type, try_gpu);
<span style="white-space:pre">			</span>Size dst_sz = resultRoi(corners, sizes).size();
<span style="white-space:pre">			</span>float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
<span style="white-space:pre">			</span>if (blend_width < 1.f)
<span style="white-space:pre">				</span>blender = Blender::createDefault(Blender::NO, try_gpu);
<span style="white-space:pre">			</span>else 
<span style="white-space:pre">			</span>{
<span style="white-space:pre">				</span>MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
<span style="white-space:pre">				</span>mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
<span style="white-space:pre">				</span>cout<<"Multi-band blender, number of bands: " << mb->numBands()<<endl;
<span style="white-space:pre">			</span>}

<span style="white-space:pre">			</span>blender->prepare(corners, sizes);
<span style="white-space:pre">		</span>}

<span style="white-space:pre">		</span>// Blend the current image
<span style="white-space:pre">		</span>blender->feed(img_warped_s, mask_warped, corners[img_idx]);
<span style="white-space:pre">	</span>}

<span style="white-space:pre">	</span>Mat result, result_mask;
<span style="white-space:pre">	</span>blender->blend(result, result_mask);

<span style="white-space:pre">	</span>imwrite(result_name, result);

<span style="white-space:pre">	</span>return 0;
}
时间: 2024-11-03 21:24:18

opencv2实现n张图像拼接_stitcher具体之代码2(简化版)_计算机视觉大作业2的相关文章

opencv2实现n张图像拼接_stitcher具体之代码1

#include <iostream> #include <fstream> #include <string> #include "opencv2/opencv_modules.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/stitching/detail/autocalib.hpp" #include "opencv2/

任意n张图像拼接_效果很好_计算机视觉大作业1终版

#include <iostream> #include <fstream> #include <string> #include "opencv2/opencv_modules.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/stitching/detail/autocalib.hpp" #include "opencv2/

opencv2实现单张图片的路线路牌检测_计算机视觉大作业2

有好多代码没有用 linefiner.h #if !defined LINEF #define LINEF #include<cmath> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #define PI 3.1415926 class LineFinder { private: // original image cv::Mat img; // vector conta

opencv2实现多张图片路线路牌检测_计算机视觉大作业2

linefinder.h同上一篇博文 main.cpp /*------------------------------------------------------------------------------------------*This file contains material supporting chapter 7 of the cookbook: Computer Vision Programming using the OpenCV Library. by Robert

opencv2实现路线路牌检测_计算机视觉大作业2终版

main.cpp #include<stdio.h> #include <iostream> #include <vector> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include<string> #include <sstream> #include "linefinder.h" #inclu

opencv2实现多张图片路线路牌(直线和圆)检测并将处理后的图片合成视频_计算机视觉大作业2

linefinder.h #if !defined LINEF #define LINEF #include<cmath> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #define PI 3.1415926 using namespace cv; using namespace std; class LineFinder { private: // original i

opencv2显示一张图像及waitkey()函数解析

准备一张图片  黑子.jpg放在新建项目文件夹里与项目名称相同的文件夹里. 读入一张图片并显示在opencv编程里经常用到 以下程序运行过,没有问题.或许#include <string>可以去掉,没有试.这是很久以前写的.越是基本的越是重要. #include <opencv2\opencv.hpp> #include <iostream> #include <string> using namespace cv; using namespace std;

EmguCV图像拼接的异常

EmguCV封装了图像拼接的功能,使用Stitcher类,Stitch方法,如果     这样的两张图像拼接,就不会报错 如果将其中一张选择,则会报错,因为Bitmap为null

由三张图说业务、技术和实施的差别

前几天突然紧急要写个方案,要求也比较明确,写出应用服务模式,数据资源组织,以及系统功能组成和部署配置方案,10来页就行.手上也有些现成的材料,后面几点也很熟悉,10页自然不在话下,晚上花1,2个小时也就整出来了,唯独第一点应用服务模式,晚上写得时候就有点纠结,果然第二天被要求重写,重画一张图还是说太技术,又在公司总工的细细指导下,画了第三张图,才勉强通过.经过这么三个回合,自己仔细一思量,发现从业务和技术的角度去描述同一件事,描述的重点和方式确实是有区别的,在这里写下我的感受,希望对像我一样从技