zoukankan      html  css  js  c++  java
  • opencv学习+踩坑

    环境

    • ubuntu 19.04
    • vscode 1.37.0
    • opencv 3.4.7
    • cmake 3.13.4

    拜一下julao的数字图像处理提纲
    https://bitlecture.github.io/notes/数字图像处理/

    然后开始跟着毛星云的blog跑demo来学opencv
    实际上如果论实用性的话,以下的系列blog可能还会更好一些?
    https://blog.csdn.net/morewindows/article/category/1291764

    https://www.cnblogs.com/long5683/p/10094122.html

    实际上学一会就会发现RM里面使用到的视觉(仅仅看这篇开源的话)并不困难

    https://blog.csdn.net/u010750137/article/details/91344986

    https://blog.csdn.net/qq_31669419/article/details/53053321

    反而是去年的神符里面涉及到了一些类似机器学习一样的东西,更有研究的空间在

    那么让我们开始视觉学习之路

    文件读取和输出
    https://blog.csdn.net/poem_qianmo/article/details/20537737

    定义图像

    Mat image = imread("Filename");
    namedWindow("Windowname");
    imshow("Windowname",image);
    

    需要注意的是图片要放到build的文件夹里面,如果没能成功imread的话,会报错——

    error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'imshow'
    

    视频读取

    VideoCapture cap;
    cap.open("Filename");
    打开摄像头
    cap.open(0);
    

    检测是否读取到的方法:

    //方法1
    if(!image.data){printf("未能读取")};
    //方法2
    if(image.empty()){printf("未能读取")};
    

    划定特定区域(ROI)
    https://blog.csdn.net/poem_qianmo/article/details/20911629

     Mat imageROI;
    //方法一
    imageROI= srcImage4(Rect(200,250,logoImage.cols,logoImage.rows));
    //方法二
    imageROI= srcImage4(Range(250,250+logoImage.rows),Range(200,200+logoImage.cols));
    

    图像变换应该也挺重要的
    https://blog.csdn.net/xiaowei_cqu/article/details/7616044

    图像线性混合
    使用addWeighted可以直接混合两张图片,

    int main()
    {
        double alphavalue = 0.5;
        double betavalue;
        
        Mat satori = imread("satori.jpg");
        Mat name = imread("name.png");
    
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;};
        if(name.empty()){cout << "未能成功读取图片satori2" << endl;exit;};
    
        betavalue = 1 - alphavalue;
        //在satori上划出ROI
        Mat ROI = satori(Rect(0,0,name.cols,name.rows));
        //将划出了ROI的satori和name做合并
        addWeighted(ROI,alphavalue,name,betavalue,0.,ROI);
    
        namedWindow("混合效果");
        imshow("混合效果",satori);
    
        waitKey();
        return 0;
    }
    

    分离/合并颜色通道
    split()/merge()

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int main()
    {    
        Mat satori = imread("satori.jpg");
        Mat name = imread("name.png",0);
        vector<Mat>channels;
        Mat blue_channel;
    
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;};
        if(name.empty()){cout << "未能成功读取图片satori2" << endl;exit;};
    
        //分割成几个颜色通道
        split(satori,channels);
        blue_channel = channels.at(0);
    
        addWeighted(blue_channel(Rect(0,0,name.cols,name.rows)),1.0,name,0.5,0,blue_channel(Rect(0,0,name.cols,name.rows)));
        //混合通道
        merge(channels,satori);
    
        namedWindow("混合效果");
        imshow("混合效果",satori);
    
        waitKey();
        return 0;
    }
    

    从颜色通道的角度来说,可以扒掉另外两个通道,只留一个通道做合成来形成单色图片
    opencv里面可以设置图片类型,比如CV_8UC1,就是unsigned int8+channel_1,所以这里的操作还是挺简单的,就是用black来取代掉另外两个通道(black意味着灰度值为0),把它给另外两个通道即可

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int main()
    {    
        Mat satori = imread("satori.jpg");
        vector<Mat> channels(satori.channels());
        vector<Mat> channels_mix(satori.channels());
        Mat mixed;
    
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;}
    
        int w = satori.cols;
        int h = satori.rows;
    
        split(satori,channels);
    
        Mat black;
        black.create(h,w,CV_8UC1);
        black = Scalar(0);
    
        channels_mix[0] = channels[0];
        channels_mix[1] = black;
        channels_mix[2] = black;
    
        merge(channels_mix,mixed);
    
         imshow("mixed",mixed);
    
        waitKey();
        return 0;
    }
    

    颜色通道和ROI,以及线性混合的内容再补充一个画矩形?
    https://blog.csdn.net/wc781708249/article/details/78518447

    会用rectangle就行了

    这个是边缘查找,感觉也是个有意思的demo
    https://www.cnblogs.com/skyfsm/p/6890863.html

    还是继续跑demo,拿小圆当看板是有点东西的,tracebar的话,应该相当于提供了类似嵌入式开发中的在线debug一样的功能?
    https://blog.csdn.net/poem_qianmo/article/details/21479533

    关于向量这个数据类型
    https://www.cnblogs.com/mr-wid/archive/2013/01/22/2871105.html

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    Mat satori;
    int threval = 160;
    
    static void trace_bar(int,void*)
    {
        Mat image = threval > 128? (satori < threval) : (satori > threval);
        vector<vector<Point>> contours;
        vector<Vec4i> hierarchy;
    
        findContours(image,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);
    
        Mat dst = Mat::zeros(satori.size(),CV_8UC3);
    
        if(!contours.empty() && !hierarchy.empty())
        {
            for (int i = 0; i >=0; i=hierarchy[i][0])
            {
                Scalar color((rand()&255),(rand()&255),(rand()&255));
                drawContours(dst,contours,i,color,CV_FILLED,8,hierarchy);
            }
        }
    
        imshow("satori",dst);
    }
    
    int main()
    {    
        satori = imread("satori.jpg",0);
    
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;}
    
        namedWindow("satori");
        createTrackbar("treashould","satori",&threval,255,trace_bar);
    
        trace_bar(threval,0);
    
        waitKey();
        return 0;
    }
    

    所以实际上主要就是找轮廓+填色,有点意思

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int Contrast,Bright;
    Mat srcImage,dstImage;
    
    static void trace_bar(int,void *)
    {
        for (int i = 0; i < srcImage.cols; i++)
        {
            for (int j = 0; j < srcImage.rows; j++)
            {
                for (int k = 0; k < 3; k++)
                {
                    dstImage.at<Vec3b>(j,i)[k] = saturate_cast<uchar>((Contrast*0.01)*srcImage.at<Vec3b>(j,i)[k] + Bright);
                }   
            }
        }
        
        imshow("satori",dstImage);
    };
    
    int main()
    {    
       srcImage = imread("satori.jpg");
    
        if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
        dstImage = Mat::zeros(srcImage.size(),srcImage.type());
    
        Contrast = 80;
        Bright = 80;
    
        namedWindow("satori");
        createTrackbar("contrast","satori",&Contrast,255,trace_bar);
        createTrackbar("bright","satori",&Bright,255,trace_bar);
    
        trace_bar(Contrast,0);
        trace_bar(Bright,0);
    
        waitKey();
        return 0;
    }
    

    这个demo主要是试了一下针对像素调bright和contrast,我没想到居然就是这么简单的线性运算关系,另外就是对单独的像素操作
    其实我们已经看出来了,图片的一种表现方式就是每个Image.at(width,height)[channel]的集合,这个值的大小包含了像素的位置,色度这两个关键信息

    到滤波了
    https://blog.csdn.net/poem_qianmo/article/details/22745559
    https://blog.csdn.net/xiaowei_cqu/article/details/7785365

    方框滤波——boxblur函数
    均值滤波(邻域平均滤波)——blur函数
    高斯滤波——GaussianBlur函数
    中值滤波——medianBlur函数
    双边滤波——bilateralFilter函数

    https://wenku.baidu.com/view/f55e1bc6f90f76c661371ac5.html
    二维卷积挺有用的,包括之后做边沿检测用的sobel算子等

    https://blog.csdn.net/dang_boy/article/details/76150067

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    
    int main()
    {    
        Mat srcImage = imread("satori.jpg");
        if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
        Mat dstImage1,dstImage2,dstImage3,dstImage4,dstImage5;
    
        dstImage1 = srcImage.clone();
        dstImage2 = srcImage.clone();
        dstImage3 = srcImage.clone();
        dstImage4 = srcImage.clone();
        dstImage5 = srcImage.clone();
    
        imshow("原图",srcImage);
    
         boxFilter(srcImage,dstImage1,-1,Size(5,5));
         imshow("方框滤波",dstImage1);
    
        blur(srcImage,dstImage2,Size(5,5));
        imshow("均值滤波",dstImage2);
     
        GaussianBlur(srcImage,dstImage3,Size(3,3),0,0);
        imshow("高斯滤波",dstImage3);
    
        medianBlur(srcImage,dstImage4,5);
        imshow("中值滤波",dstImage4);
    
        bilateralFilter(srcImage,dstImage5,25,25*2,25/2);
        imshow("双边滤波",dstImage5);
    
        waitKey();
        destroyAllWindows();
    
        return 0;
    }
    

    简单的滤波跑了一下而已,Size(w,h)规定了卷积核的大小,卷积核的大小会影响模糊的效果

    然后是非线性滤波
    中值滤波和双线性滤波

    双线性滤波的效果非常神奇,把原图上一些类似于陈旧的纹理一样的效果给修没了,非常6p(磨皮?)

    膨胀腐蚀

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int Elem_Size = 3;
    int value1;
    
    Mat srcImage,dstImage;
    
    static void tracebar(int,void*)
    {
        Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
        
        if(value1 == 0)
        {
            erode(srcImage,dstImage,element);
        }
       else
       {
           dilate(srcImage,dstImage,element);
       }
       
        
        imshow("satori",dstImage);
    }
    
    int main()
    {    
        srcImage = imread("satori.jpg");
        if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
        Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
        erode(srcImage,dstImage,element);
    
        imshow("satori",dstImage);
    
        createTrackbar("腐蚀/膨胀","satori",&value1,1,tracebar);
        createTrackbar("内核尺寸","satori",&Elem_Size,21,tracebar);
    
        tracebar(value1,0);
        tracebar(Elem_Size,0);
    
        while(char(waitKey(1)) != 'q');
        
        return 0;
    }
    

    腐蚀是将暗的像素扩大,膨胀是将亮的像素扩大

    在这个基础上还有开运算,闭运算,黑帽运算......
    开运算其实就是分开细微链接的像素,闭运算是填平小的裂痕
    https://blog.csdn.net/hanshanbuleng/article/details/80657148

    Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
    
        morphologyEx(srcImage,dstImage,MORPH_OPEN,element);
    

    更改第三个参数即可

    终于到快乐的算子环节了

    在具体介绍之前,先来一起看看边缘检测的一般步骤吧。

    • 1)滤波:边缘检测的算法主要是基于图像强度的一阶和二阶导数,但导数通常对噪声很敏感,因此必须采用滤波器来改善与噪声有关的边缘检测器的性能。常见的滤波方法主要有高斯滤波,即采用离散化的高斯函数产生一组归一化的高斯核(具体见“高斯滤波原理及其编程离散化实现方法”一文),然后基于高斯核函数对图像灰度矩阵的每一点进行加权求和(具体程序实现见下文)。

    • 2)增强:增强边缘的基础是确定图像各点邻域强度的变化值。增强算法可以将图像灰度点邻域强度值有显著变化的点凸显出来。在具体编程实现时,可通过计算梯度幅值来确定。

    • 3)检测:经过增强的图像,往往邻域中有很多点的梯度值比较大,而在特定的应用中,这些点并不是我们要找的边缘点,所以应该采用某种方法来对这些点进行取舍。实际工程中,常用的方法是通过阈值化方法来检测。

    边缘检测应该是RM里面非常常用的算法了,识别装甲板应该主要就用了这个,识别到边缘之后solvepnp

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int Elem_Size = 1;
    int value1;
    
    Mat srcImage,dstImage;
    
    int main()
    {    
        srcImage = imread("satori.jpg");
        if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
        Canny(srcImage,dstImage,300,100);
    
        imshow("satori",dstImage);
        
        while (char(waitKey(1)) != 'q');
        
        return 0;
    }
    

    用canny很容易就可以看到效果,调节一下两个阈值则可以起到抑制噪声的作用

    sobel算子可以计算x方向和y方向各自的梯度方向,相比canny而言,可以在一些相对比较特定(特征在x/y方向)的场景起到作用

        #include "opencv2/core/core.hpp"
        #include "opencv2/highgui/highgui.hpp"
        #include "opencv2/opencv.hpp"
        #include "opencv2/imgproc/imgproc.hpp"
        #include "iostream"
    
        using namespace cv;
        using namespace std;
    
        int Elem_Size = 1;
        int value1;
    
        Mat srcImage,dstImage,dstImage2,dstImage3;
    
        int main()
        {    
            Mat satori;
    
            satori = imread("satori.jpg");
            if(satori.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
            imshow("image",satori);
    
            bilateralFilter(satori,srcImage,25,25*2,25/2);
    
            cvtColor(srcImage,srcImage,CV_RGB2GRAY);
    
    
            Sobel(srcImage,dstImage,srcImage.depth(),1,0,3,1,0,BORDER_DEFAULT);
    
            Sobel(srcImage,dstImage2,srcImage.depth(),0,1,3,1,0,BORDER_DEFAULT);
    
            imshow("satori",dstImage);
            imshow("satori2",dstImage2);
            
            addWeighted(dstImage,1,dstImage2,1,1,dstImage3);
    
            imshow("satori3",dstImage3);
    
            while (char(waitKey(1)) != 'q');
            
            return 0;
        }
    

    结合了双边滤波后在x,y方向做sobel检测,然后合成,效果还行

    结果试了一下双边滤波后做laplace检测,效果更好,啧啧

        #include "opencv2/core/core.hpp"
        #include "opencv2/highgui/highgui.hpp"
        #include "opencv2/opencv.hpp"
        #include "opencv2/imgproc/imgproc.hpp"
        #include "iostream"
    
        using namespace cv;
        using namespace std;
    
        int Elem_Size = 1;
        int value1;
    
        Mat srcImage,dstImage,dstImage2,dstImage3;
    
        int main()
        {    
            Mat satori;
    
            satori = imread("satori.jpg");
            if(satori.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
            imshow("image",satori);
    
            bilateralFilter(satori,srcImage,25,25*2,25/2);
    
            cvtColor(srcImage,srcImage,CV_RGB2GRAY);
    
            Laplacian(srcImage,dstImage,srcImage.depth());
    
            imshow("satori",dstImage);
            
            while (char(waitKey(1)) != 'q');
            
            return 0;
        }
    

    但是还有Scharr,可以看成对sobel的进一步优化?试试看效果

        #include "opencv2/core/core.hpp"
        #include "opencv2/highgui/highgui.hpp"
        #include "opencv2/opencv.hpp"
        #include "opencv2/imgproc/imgproc.hpp"
        #include "iostream"
    
        using namespace cv;
        using namespace std;
    
        int Elem_Size = 1;
        int value1;
    
        Mat srcImage,dstImage,dstImage2,dstImage3;
    
        int main()
        {    
            Mat satori;
    
            satori = imread("satori.jpg");
            if(satori.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
            bilateralFilter(satori,srcImage,25,25*2,25/2);
           imshow("image",srcImage);
    
            cvtColor(srcImage,srcImage,CV_RGB2GRAY);
    
    
            Scharr(srcImage,dstImage,srcImage.depth(),1,0,1,0,BORDER_DEFAULT);
    
            Scharr(srcImage,dstImage2,srcImage.depth(),0,1,1,0,BORDER_DEFAULT);
    
            imshow("satori",dstImage);
            imshow("satori2",dstImage2);
            
            addWeighted(dstImage,1,dstImage2,1,1,dstImage3);
    
            imshow("satori3",dstImage3);
    
            while (char(waitKey(1)) != 'q');
            
            return 0;
        }
    
    

    结果没看出了什么优化,反而引入了更多的噪声......可能是我参数没继续调吧(另一层面上来说更加敏锐?

    好了,我们到了快乐的resize阶段,还有pryUp,pryDown这两个金字塔放大缩小函数
    https://blog.csdn.net/poem_qianmo/article/details/26157633

    我觉得没啥特别好说的,就是研究怎么样尽可能合理的采样或者插值,然后高斯函数的优势又一次被体现出来了。不得不这是个非常伟大的函数(我刚学到大数定律时就被这个函数的神奇性给吓到了)

        #include "opencv2/core/core.hpp"
        #include "opencv2/highgui/highgui.hpp"
        #include "opencv2/opencv.hpp"
        #include "opencv2/imgproc/imgproc.hpp"
        #include "iostream"
    
        using namespace cv;
        using namespace std;
    
        int main()
        {    
            Mat tmpImage,dstImage;
    
            tmpImage = imread("satori.jpg");
            if(tmpImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}
    
            dstImage = tmpImage;
            while (1)
            {
                char key = waitKey(1);
    
                switch (key)
                {
                case 'q':
                    return 0;
                    break;
                
                case 'w':
                    resize(tmpImage,dstImage,Size(tmpImage.cols*2,tmpImage.rows*2));
                    break;
    
                case 's':
                    resize(tmpImage,dstImage,Size(tmpImage.cols/2,tmpImage.rows/2));
                    break;
    
                default:
                    break;
                }
    
                tmpImage = dstImage;
                imshow("satori",dstImage);
            }
        }
    
    

    不出意外的,缩小之后再放大之后会上天

    霍夫线/圆检测算法

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int main()
    {    
        Mat srcImage,dstImage,midImage;
    
        srcImage = imread("test.jpg");
        if(srcImage.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        Canny(srcImage,midImage,400,100,3);
    
        cvtColor(midImage,dstImage,CV_GRAY2BGR);
    
        vector<Vec2f> lines;
        HoughLines(midImage,lines,1,CV_PI/180,150,0,0);    
    
        for (size_t i = 0; i < lines.size(); i++)
        {
            float rho = lines[i][0] , theta = lines[i][1];
            Point pt1,pt2;
            double a = cos(theta),b = sin(theta);
            double x0 = a*rho,y0 = b*rho;
            pt1.x = cvRound(x0 + 1000 * (-b));
            pt1.y = cvRound(y0 + 1000 * (a));
            pt2.x = cvRound(x0 - 1000 * (-b));
            pt2.y = cvRound(y0 - 1000 * (a));
    
            line(dstImage,pt1,pt2,Scalar(55,100,95),1,CV_AA);
        }    
    
        imshow("dst",dstImage);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    这个检测看得我头大
    另外还有HoughLinesP这个检测方法,有点意思

    试验了一下,HoughLinesP应该是相比HoughLine更优秀的一种检测方法,因为他是可以检测出线的起始的,而且有更多实用的可调参数(可以显示的最小/最大线段长度等)

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int main()
    {    
        Mat srcImage,dstImage,midImage;
    
        srcImage = imread("test.jpg");
        if(srcImage.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        Canny(srcImage,midImage,400,100,3);
    
        cvtColor(midImage,dstImage,CV_GRAY2BGR);
    
        vector<Vec4i> lines;
        HoughLinesP(midImage,lines,1,CV_PI/180,150,0,0);    
    
        for (size_t i = 0; i < lines.size(); i++)
        {
            Vec4i l = lines[i];
            line(dstImage,Point(l[0],l[1]),Point(l[2],l[3]),Scalar(0,100,0),5,CV_AA);
        }    
    
        imshow("dst",dstImage);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    还有HoughCircles
    RM上倒是不太用得上圆检测,除非要检测弹丸(谁没事干检测弹丸),RC这边倒是应该用得上?毕竟有几次赛题要扔球来着

    HoughCircles里面,第5个参数规定了可检测的最大半径的圆形,有筛选作用,6,7参数则起到规定阈值的作用,也挺有用的

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int main()
    {    
        Mat srcImage,dstImage,midImage;
    
        srcImage = imread("circle.jpeg");
        if(srcImage.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        cvtColor(srcImage,midImage,CV_BGR2GRAY);
        GaussianBlur(midImage,midImage,Size(3,3),1,1);
    
        vector<Vec3f> circles;
        HoughCircles(midImage,circles,CV_HOUGH_GRADIENT,1.5,20,300,100,0,0);
    
        for (size_t i = 0; i < circles.size(); i++)
        {
            Point center(cvRound(circles[i][0]),cvRound(circles[i][1]));
            int radius = cvRound(circles[i][2]);
    
            circle(srcImage,center,radius,Scalar(0,100,0),3);
        }
        
    
        imshow("dst",srcImage);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    调了一下参数,能够比较好的检测到图中想找的圆

    去研究源码实现的话会发现其实HoughCircle,HoughCircleP其实都是基于HoughCircle2(旧的霍夫圆检测)实现的,这里就不去太扣底层的东西,来日方长

    到快乐的漫水填充算法了

    floodFill从功能上去理解就是和ps的魔术棒一样,总的来说是非常重要的一个功能

    https://blog.csdn.net/poem_qianmo/article/details/28261997

    算法原理其实挺好理解的,就是先选中一个点作为种子,以这个种子作为起点去计算周边像素差值,在阈值范围内的像素作为下一批种子

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int main()
    {    
        Mat srcImage,dstImage,midImage;
    
        srcImage = imread("satori.jpg");
        if(srcImage.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        Rect ccomp;
        floodFill(srcImage,Point(0,0),Scalar(0,0,0),&ccomp,Scalar(10,10,10),Scalar(10,10,10));
    
        imshow("dst",srcImage);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    最后两个Scalar参数用来框定选取阈值,调了一下之后可以比较好的把人物抠出来

    floodFill还可以设置掩膜模式,避免漫水填充到掩膜内的非0像素

    最后floodFill的最后参数还有一个32位操作数,高8位,中8位,低8位都有含义,相当的复杂,这里就不去过于细致的研究了

    emmmm到角点检测了

    角点检测应该是比较重要的,不管是RC还是RM,像RM里面识别到灯柱之后,要给灯柱四个角上的关键点都标注出来,然后才能做pnp结算

    图像特征类型可以被分为如下三种:

    • <1>边缘
    • <2>角点 (感兴趣关键点)
    • <3>斑点(Blobs)(感兴趣区域)

    在当前的图像处理领域,角点检测算法可归纳为三类:

    • <1>基于灰度图像的角点检测
    • <2>基于二值图像的角点检测
    • <3>基于轮廓曲线的角点检测

    角点检测算法又是梯度运算的一大应用场景(这个想想就能知道)

    配合角点检测的还有一个知名度非常高的方法那就是二值化——一共有5种方法(茴香豆的茴字有几种写法啊?)

    先试一下二值化

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int thresholdvalue;
    Mat src_image,dst_image1,dst_image2,dst_image3,dst_image4,dst_image5;
    
    static void tracebar(int,void*)
    {
        threshold(src_image,dst_image1,thresholdvalue,255,THRESH_BINARY);
    
        imshow("test",dst_image1);
    }
    
    int main()
    {    
    
        src_image = imread("satori.jpg");
        if(src_image.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        cvtColor(src_image,src_image,CV_BGR2GRAY);
    
        namedWindow("test");
    
        createTrackbar("threshold","test",&thresholdvalue,255,tracebar);
    
        tracebar(thresholdvalue,0);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    我就不尝试每一种方法了,总之还是挺立竿见影的

    值得一提的是connerHarris之后的图像只有通过一个很小阈值的threshold之后才能显现出来

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int thresholdvalue;
    Mat src_image,dst_image1,dst_image2,dst_image3,dst_image4,dst_image5;
    Mat mid_image;
    
    int main()
    {    
        src_image = imread("satori.jpg");
        if(src_image.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        cvtColor(src_image,src_image,CV_BGR2GRAY);
    
        namedWindow("test");
    
        cornerHarris(src_image,mid_image,5,3,0.01);
    
       threshold(mid_image,dst_image1,0.0001,255,THRESH_BINARY);
    
        imshow("test",dst_image1);
        
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    可以知道经过harris检测之后的图像值被放的很小,不符合我们常用的0-255的灰度规定,所以如果要用的话,一般来说还得经过操作,比如二值化,比如normolize

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int thresholdvalue;
    Mat src_image,dst_image1,dst_image2,dst_image3,dst_image4,dst_image5;
    Mat mid_image;
    
    static void tracebar(int,void*)
    {
        threshold(dst_image1,dst_image2,thresholdvalue,255,THRESH_BINARY);
    
        imshow("test",dst_image2);
    }
    
    int main()
    {    
        src_image = imread("satori.jpg");
        if(src_image.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        cvtColor(src_image,src_image,CV_BGR2GRAY);
    
        namedWindow("test");
    
        cornerHarris(src_image,mid_image,5,3,0.01);
        normalize(mid_image,dst_image1,0,255,NORM_MINMAX,CV_32FC1,Mat());
    
        createTrackbar("threshold","test",&thresholdvalue,255,tracebar);
    
        tracebar(thresholdvalue,0);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    这下拖条总算有点用了

    重映射和surf特征点检测,surf应该是比较通用的特征点检测算法了,总体而言

    https://www.cnblogs.com/dengxiaojun/p/5302778.html

    重映射主要是使用remap这个函数,remap的数学定义如下

    [dst(x,y) = src(map_1(x,y),map_2(x,y)) ]

    其中 (map_1)(map_2)都是作为参数输入remap函数的(值得一提的是map的类型不是随意的,create的时候要创建CV_32FC1)

    这里有个简单的应用,比如将一个图片给镜像翻转

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace std;
    
    int thresholdvalue;
    Mat src_image,dst_image1,dst_image2;
    Mat mid_image;
    
    int main()
    {    
        src_image = imread("satori.jpg");
        if(src_image.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        Mat map_x,map_y;
    
        map_x.create(src_image.size(),CV_32FC1);
        map_y.create(src_image.size(),CV_32FC1);
    
        for (size_t i = 0; i < src_image.cols; i++)
        {
            for (size_t j = 0; j < src_image.rows; j++)
            {
                map_x.at<float>(j,i) = static_cast<float>(i);
                map_y.at<float>(j,i) = static_cast<float>(src_image.rows-j);
            }
        }
        
        remap(src_image,dst_image1,map_x,map_y,CV_INTER_LINEAR,BORDER_CONSTANT,Scalar(0,0,0));
    
        imshow("dst1",dst_image1);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    这个没有什么难度的,像素操作的时候稍微注意一点就行了

    然后我们来关注一下重点——我们亲爱的SURF特征检测算法,在opencv里面SURF被封成了一个类,有一堆可以执行的乱七八糟的操作

    我们跑个drawKeypoints试试

    然后发现好玩的事情

    https://blog.csdn.net/zhounanzhaode/article/details/50302385

    所以为了使用SURF我还得再操作一下,真的烦人

    直接使用apt-get的方法失败了,会在添加ppa时报错——

    没有 Release 文件。 N: 无法安全地用该源进行更新,所以默认禁用该源。

    采用自己摸索的方法操作成功,具体的操作方法为:

    去github上面下载这个库

    https://github.com/opencv/opencv_contrib/tree/3.4

    然后checkout到和opencv版本一致的tag下面

    将opencv_contrib/modules/....下面所需要的模组复制粘贴到opencv/modules/下面去

    然后修改opencv/modules下的cmakelist中的

    set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect dnn ml flann photo stitching xfeatures2d)
    
    

    添加自己需要的新的模组即可

    然后就是和安装时一样的cmake,make,make install三连,成功的话就可以正常include nonfree.hpp了

    emmmmmm

    看了一下,更新到opencv3之后,SURF的使用和opencv2完全不一样了,除了drawKeypoint这个api仍然保留之外,其他的好像都变化了

    随手找了个教程,结果又找到RM相关的了,哈哈哈哈哈哈

    https://www.cnblogs.com/long5683/p/9692987.html

    结果发现之前的操作没弄干净......编译的时候出现了这个

    error: (-213:The function/feature is not implemented) This algorithm is patented and is excluded in this configuration; Set OPENCV_ENABLE_NONFREE CMake option and rebuild the library in function 'create'
    
    

    结果就是还得在cmake的时候操作一下.....干啊

    https://blog.csdn.net/zhoukehu_CSDN/article/details/83145026

    按照这个博客走,记得在cmake-gui里面把OPENCV_ENABLE_NONFREE这个勾上......唉,这个是真的恶心,又要make整整20分钟了

    重新make完之后就一切ok了

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/features2d/features2d.hpp"
    #include "opencv2/xfeatures2d/nonfree.hpp"
    #include "iostream"
    
    using namespace cv;
    using namespace cv::xfeatures2d;
    using namespace std;
    
    int thresholdvalue;
    Mat src_image,dst_image1,dst_image2;
    Mat mid_image;
    
    int main()
    {    
        src_image = imread("satori.jpg");
        if(src_image.empty()){cout << "未能成功读取图片" << endl;return -1;}
    
        int minHessian = 400;
    
        Ptr<SURF> detector = SURF::create(minHessian);
        vector<KeyPoint> keypoints;
        detector->detect(src_image,keypoints,Mat());
    
        drawKeypoints(src_image,keypoints,dst_image1,Scalar(0,0,0));
    
        imshow("dst",dst_image1);
    
        while(char(waitKey(1)) != 'q');
        return 0;
    }
    
    

    暂时跟着demo跑的内容就先这么多吧

    实际上还需要学习的内容包括相机有关的一些知识(内参矩阵标定),solvePnp的使用等等,包括video模块下的一些有用的功能我也还没去做了解

    后面就开始看几个实际项目研究研究,一边巩固已经学习的东西,一边积累工作经验吧

  • 相关阅读:
    html5和css3的新特性
    实现全选按钮的js代码
    window.location对象获取浏览器地址栏的地址信息
    珍爱网前端笔试题之九宫格的实现
    c# array arraylist list
    解决visual studio不能发现单元测试、无法运行单元测试的方法
    Linux 学习笔记
    C++语言学习
    C语言学习
    日志打印,设置开关类【编程技巧】
  • 原文地址:https://www.cnblogs.com/sasasatori/p/11811473.html
Copyright © 2011-2022 走看看