今天开始学习opencv,将前一段时间maltab仿真的结果在c++下实现,这样一来可以学习c++,或者学习opencv这种开源工具的代码习惯。
我主要做的是图像的配准,我计划最终实现基于视频的全景图像拼接,慢慢来,现在只做图像配准。
今天一个代码很有意义,现在记录下来。
1 #include <opencv2/opencv.hpp> 2 #include <stdio.h> 3 4 using namespace std; 5 using namespace cv; 6 7 8 9 int main(int argc, char* argv[]) 10 { 11 Mat img1=imread("bird1.jpg"); 12 Mat img2=imread("bird2.jpg"); 13 cvtColor(img1,img1,CV_RGB2GRAY); 14 cvtColor(img2,img2,CV_RGB2GRAY); 15 16 //-- Step 1: Detect the keypoints using SURF Detector 17 int minHessian=400; 18 SurfFeatureDetector detector( minHessian ); 19 std::vector<KeyPoint> keypoints_1, keypoints_2; 20 detector.detect( img1, keypoints_1 ); 21 detector.detect( img2, keypoints_2 ); 22 23 //-- Step 2: Calculate descriptors (feature vectors) 24 SurfDescriptorExtractor extractor; 25 Mat descriptors_1, descriptors_2; 26 extractor.compute( img1, keypoints_1, descriptors_1 ); 27 extractor.compute( img2, keypoints_2, descriptors_2 ); 28 29 //-- Step 3: Matching descriptor vectors using FLANN matcher 30 FlannBasedMatcher matcher; 31 // BruteForceMatcher<L2<float>> matcher; 32 std::vector< DMatch > matches; 33 matcher.match( descriptors_1, descriptors_2, matches ); 34 35 double max_dist = 0; 36 double min_dist = 100; 37 //-- Quick calculation of max and min distances between keypoints 38 for( int i = 0; i < descriptors_1.rows; i++ ) 39 { 40 double dist = matches[i].distance; 41 if( dist < min_dist ) min_dist = dist; 42 if( dist > max_dist ) max_dist = dist; 43 } 44 45 printf("-- Max dist : %f \n", max_dist ); 46 printf("-- Min dist : %f \n", min_dist ); 47 48 //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist ) 49 //-- PS.- radiusMatch can also be used here. 50 std::vector< DMatch > good_matches; 51 52 for( int i = 0; i < descriptors_1.rows; i++ ) 53 { 54 if( matches[i].distance < 2*min_dist ) 55 { 56 good_matches.push_back( matches[i]); 57 } 58 } 59 60 //-- Draw only "good" matches 61 Mat img_matches; 62 drawMatches( img1, keypoints_1, img2, keypoints_2, 63 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 64 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 65 66 67 //-- Localize the object 68 std::vector<Point2f> obj; 69 std::vector<Point2f> scene; 70 71 for( int i = 0; i < good_matches.size(); i++ ) 72 { 73 //-- Get the keypoints from the good matches 74 obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt ); 75 scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt ); 76 } 77 78 Mat H = findHomography( obj, scene, CV_RANSAC ); 79 80 //-- Get the corners from the image_1 ( the object to be "detected" ) 81 std::vector<Point2f> obj_corners(4); 82 obj_corners[0] = cvPoint(0,0); 83 obj_corners[1] = cvPoint( img1.cols, 0 ); 84 obj_corners[2] = cvPoint( img1.cols, img1.rows ); 85 obj_corners[3] = cvPoint( 0, img1.rows ); 86 std::vector<Point2f> scene_corners(4); 87 perspectiveTransform( obj_corners, scene_corners, H); 88 89 //-- register the imahe using perspective transform 90 //-- get the size of the registerd image 91 Mat imageturn=Mat::zeros(max(scene_corners[1].x,scene_corners[2].x), 92 max(scene_corners[2].y,scene_corners[3].y),img1.type()); 93 warpPerspective(img1,imageturn,H,cv::Size(imageturn.rows,imageturn.cols)); 94 95 //-- register the image using affine transform 96 vector<Point2f> img1corners(3); 97 img1corners[0]=obj[0]; 98 img1corners[1]=obj[1]; 99 img1corners[2]=obj[2]; 100 vector<Point2f> img2corners(3); 101 img2corners[0]=scene[0]; 102 img2corners[1]=scene[1]; 103 img2corners[2]=scene[2]; 104 Mat affineMat=getAffineTransform(img1corners,img2corners); 105 Mat imageAffine=Mat::zeros(max(scene_corners[1].x,scene_corners[2].x), 106 max(scene_corners[2].y,scene_corners[3].y),img1.type()); 107 warpAffine(img1,imageAffine,affineMat,cv::Size(imageAffine.rows,imageAffine.cols)); 108 109 //-- Draw lines between the corners (the mapped object in the scene - image_2 ) 110 line( img_matches, scene_corners[0] + Point2f( img1.cols, 0), scene_corners[1] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 ); 111 line( img_matches, scene_corners[1] + Point2f( img1.cols, 0), scene_corners[2] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 ); 112 line( img_matches, scene_corners[2] + Point2f( img1.cols, 0), scene_corners[3] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 ); 113 line( img_matches, scene_corners[3] + Point2f( img1.cols, 0), scene_corners[0] + Point2f( img1.cols, 0), Scalar( 0, 255, 0), 4 ); 114 115 //-- Show detected matches 116 imshow( "Good Matches & Object detection", img_matches ); 117 imshow("perspective image",imageturn); 118 imshow("affine image",imageAffine); 119 //-- Show detected matches 120 //imshow( "Good Matches", img_matches ); 121 122 //for( int i = 0; i < good_matches.size(); i++ ) 123 //{ 124 // printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); 125 //} 126 waitKey(); 127 return 0; 128 }