zoukankan      html  css  js  c++  java
  • 第二章启程前的认知准备,2.1Opencv官方例程引导与赏析

    1、在opencv安装目录下,可以找到opencv官方提供的示例代码,具体位于...opencvsourcessamples目录下,如下所示

    名为c的文件夹存放着opencv1.0等旧版本的示例程序;名为cpp的文件夹存放着opencv2.x等新版本的示例程序。

    在...opencvsourcessamplescpp utorial_code路径下,存放着和官方教程配套的示例程序。其内容按opencv各组件模块而分类。

    示例程序的运行

    1、彩色目标跟踪:Camshift

    #include "opencv2/video/tracking.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/highgui/highgui.hpp"
    
    #include <iostream>
    #include <ctype.h>
    
    using namespace cv;
    using namespace std;
    
    Mat image;
    
    bool backprojMode = false;
    bool selectObject = false;
    int trackObject = 0;
    bool showHist = true;
    Point origin;
    Rect selection;
    int vmin = 10, vmax = 256, smin = 30;
    
    static void onMouse(int event, int x, int y, int, void*)
    {
        if (selectObject)
        {
            selection.x = MIN(x, origin.x);
            selection.y = MIN(y, origin.y);
            selection.width = std::abs(x - origin.x);
            selection.height = std::abs(y - origin.y);
    
            selection &= Rect(0, 0, image.cols, image.rows);
        }
    
        switch (event)
        {
        case CV_EVENT_LBUTTONDOWN:
            origin = Point(x, y);
            selection = Rect(x, y, 0, 0);
            selectObject = true;
            break;
        case CV_EVENT_LBUTTONUP:
            selectObject = false;
            if (selection.width > 0 && selection.height > 0)
                trackObject = -1;
            break;
        }
    }
    
    static void help()
    {
        cout << "
    This is a demo that shows mean-shift based tracking
    "
            "You select a color objects such as your face and it tracks it.
    "
            "This reads from video camera (0 by default, or the camera number the user enters
    "
            "Usage: 
    "
            "   ./camshiftdemo [camera number]
    ";
    
        cout << "
    
    Hot keys: 
    "
            "	ESC - quit the program
    "
            "	c - stop the tracking
    "
            "	b - switch to/from backprojection view
    "
            "	h - show/hide object histogram
    "
            "	p - pause video
    "
            "To initialize tracking, select the object with mouse
    ";
    }
    
    const char* keys =
    {
        "{1|  | 0 | camera number}"
    };
    
    int main(int argc, const char** argv)
    {
        help();
    
        VideoCapture cap;
        Rect trackWindow;
        int hsize = 16;
        float hranges[] = { 0, 180 };
        const float* phranges = hranges;
        CommandLineParser parser(argc, argv, keys);
        int camNum = parser.get<int>("1");
    
        cap.open(camNum);
    
        if (!cap.isOpened())
        {
            help();
            cout << "***Could not initialize capturing...***
    ";
            cout << "Current parameter's value: 
    ";
            parser.printParams();
            return -1;
        }
    
        namedWindow("Histogram", 0);
        namedWindow("CamShift Demo", 0);
        setMouseCallback("CamShift Demo", onMouse, 0);
        createTrackbar("Vmin", "CamShift Demo", &vmin, 256, 0);
        createTrackbar("Vmax", "CamShift Demo", &vmax, 256, 0);
        createTrackbar("Smin", "CamShift Demo", &smin, 256, 0);
    
        Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
        bool paused = false;
    
        for (;;)
        {
            if (!paused)
            {
                cap >> frame;
                if (frame.empty())
                    break;
            }
    
            frame.copyTo(image);
    
            if (!paused)
            {
                cvtColor(image, hsv, COLOR_BGR2HSV);
    
                if (trackObject)
                {
                    int _vmin = vmin, _vmax = vmax;
    
                    inRange(hsv, Scalar(0, smin, MIN(_vmin, _vmax)),
                        Scalar(180, 256, MAX(_vmin, _vmax)), mask);
                    int ch[] = { 0, 0 };
                    hue.create(hsv.size(), hsv.depth());
                    mixChannels(&hsv, 1, &hue, 1, ch, 1);
    
                    if (trackObject < 0)
                    {
                        Mat roi(hue, selection), maskroi(mask, selection);
                        calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                        normalize(hist, hist, 0, 255, CV_MINMAX);
    
                        trackWindow = selection;
                        trackObject = 1;
    
                        histimg = Scalar::all(0);
                        int binW = histimg.cols / hsize;
                        Mat buf(1, hsize, CV_8UC3);
                        for (int i = 0; i < hsize; i++)
                            buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180. / hsize), 255, 255);
                        cvtColor(buf, buf, CV_HSV2BGR);
    
                        for (int i = 0; i < hsize; i++)
                        {
                            int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows / 255);
                            rectangle(histimg, Point(i*binW, histimg.rows),
                                Point((i + 1)*binW, histimg.rows - val),
                                Scalar(buf.at<Vec3b>(i)), -1, 8);
                        }
                    }
    
                    calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
                    backproj &= mask;
                    RotatedRect trackBox = CamShift(backproj, trackWindow,
                        TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1));
                    if (trackWindow.area() <= 1)
                    {
                        int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5) / 6;
                        trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
                            trackWindow.x + r, trackWindow.y + r) &
                            Rect(0, 0, cols, rows);
                    }
    
                    if (backprojMode)
                        cvtColor(backproj, image, COLOR_GRAY2BGR);
                    ellipse(image, trackBox, Scalar(0, 0, 255), 3, CV_AA);
                }
            }
            else if (trackObject < 0)
                paused = false;
    
            if (selectObject && selection.width > 0 && selection.height > 0)
            {
                Mat roi(image, selection);
                bitwise_not(roi, roi);
            }
    
            imshow("CamShift Demo", image);
            imshow("Histogram", histimg);
    
            char c = (char)waitKey(10);
            if (c == 27)
                break;
            switch (c)
            {
            case 'b':
                backprojMode = !backprojMode;
                break;
            case 'c':
                trackObject = 0;
                histimg = Scalar::all(0);
                break;
            case 'h':
                showHist = !showHist;
                if (!showHist)
                    destroyWindow("Histogram");
                else
                    namedWindow("Histogram", 1);
                break;
            case 'p':
                paused = !paused;
                break;
            default:
                ;
            }
        }
    
        return 0;
    }
    View Code

    程序的用法是根据鼠标框选区域的色度光谱来进行摄像头读入的视频目标的跟踪。其重要采用CamShift算法,全称是“Continuously Adaptive Mean-SHIFT”,是对MeanShift算法的改进,被称为连续自适应的MeanShift算法。
    2、光流:optical flow

    放流法是目前运动图像分析的重要方法,光流用来指定时变图像中模式的运动速度,因为当物体在运动时,在图像上对应的点的亮度模式也在运动。这种图像亮度模式的表观运动(apparent motion)就是光流。光流表达了图像的变化,由于它包含了目标运动的信息,因此可被观察者用来确定目标的运动情况。

    //--------------------------------------【程序说明】-------------------------------------------
    //        程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序09
    //        程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-利用光流法进行运动目标检测
    //        开发测试所用操作系统: Windows 7 64bit
    //        开发测试所用IDE版本:Visual Studio 2010
    //        开发测试所用OpenCV版本:    2.4.9
    //        2014年11月 Revised by @浅墨_毛星云
    //------------------------------------------------------------------------------------------------
    
    
    /************************************************************************
    * Copyright(c) 2011  Yang Xian
    * All rights reserved.
    *
    * File:    opticalFlow.cpp
    * Brief: lk光流法做运动目标检测
    * Version: 1.0
    * Author: Yang Xian
    * Email: xyang2011@sinano.ac.cn
    * Date:    2011/11/18
    * History:
    ************************************************************************/
    
    
    //---------------------------------【头文件、命名空间包含部分】----------------------------
    //        描述:包含程序所使用的头文件和命名空间
    //-------------------------------------------------------------------------------------------------
    #include <opencv2/video/video.hpp>
    #include <opencv2/highgui/highgui.hpp>
    #include <opencv2/imgproc/imgproc.hpp>
    #include <opencv2/core/core.hpp>
    #include <iostream>
    #include <cstdio>
    
    using namespace std;
    using namespace cv;
    
    
    
    
    
    //-----------------------------------【全局函数声明】-----------------------------------------
    //        描述:声明全局函数
    //-------------------------------------------------------------------------------------------------
    void tracking(Mat &frame, Mat &output);
    bool addNewPoints();
    bool acceptTrackedPoint(int i);
    
    //-----------------------------------【全局变量声明】-----------------------------------------
    //        描述:声明全局变量
    //-------------------------------------------------------------------------------------------------
    string window_name = "optical flow tracking";
    Mat gray;    // 当前图片
    Mat gray_prev;    // 预测图片
    vector<Point2f> points[2];    // point0为特征点的原来位置,point1为特征点的新位置
    vector<Point2f> initial;    // 初始化跟踪点的位置
    vector<Point2f> features;    // 检测的特征
    int maxCount = 500;    // 检测的最大特征数
    double qLevel = 0.01;    // 特征检测的等级
    double minDist = 10.0;    // 两特征点之间的最小距离
    vector<uchar> status;    // 跟踪特征的状态,特征的流发现为1,否则为0
    vector<float> err;
    
    
    //--------------------------------【help( )函数】----------------------------------------------
    //        描述:输出帮助信息
    //-------------------------------------------------------------------------------------------------
    static void help()
    {
        //输出欢迎信息和OpenCV版本
        cout << "
    
    			非常感谢购买《OpenCV3编程入门》一书!
    "
            << "
    
    			此为本书OpenCV2版的第9个配套示例程序
    "
            << "
    
    			   当前使用的OpenCV版本为:" << CV_VERSION
            << "
    
      ----------------------------------------------------------------------------";
    }
    
    
    //-----------------------------------【main( )函数】--------------------------------------------
    //        描述:控制台应用程序的入口函数,我们的程序从这里开始
    //-------------------------------------------------------------------------------------------------
    int main()
    {
    
        Mat frame;
        Mat result;
    
        VideoCapture capture("1.avi");
    
        help();
        if (capture.isOpened())    // 摄像头读取文件开关
        {
            while (true)
            {
                capture >> frame;
    
                if (!frame.empty())
                {
                    tracking(frame, result);
                }
                else
                {
                    printf(" --(!) No captured frame -- Break!");
                    break;
                }
    
                int c = waitKey(50);
                if ((char)c == 27)
                {
                    break;
                }
            }
        }
        return 0;
    }
    
    //-------------------------------------------------------------------------------------------------
    // function: tracking
    // brief: 跟踪
    // parameter: frame    输入的视频帧
    //              output 有跟踪结果的视频帧
    // return: void
    //-------------------------------------------------------------------------------------------------
    void tracking(Mat &frame, Mat &output)
    {
        cvtColor(frame, gray, CV_BGR2GRAY);
        frame.copyTo(output);
        // 添加特征点
        if (addNewPoints())
        {
            goodFeaturesToTrack(gray, features, maxCount, qLevel, minDist);
            points[0].insert(points[0].end(), features.begin(), features.end());
            initial.insert(initial.end(), features.begin(), features.end());
        }
    
        if (gray_prev.empty())
        {
            gray.copyTo(gray_prev);
        }
        // l-k光流法运动估计
        calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err);
        // 去掉一些不好的特征点
        int k = 0;
        for (size_t i = 0; i<points[1].size(); i++)
        {
            if (acceptTrackedPoint(i))
            {
                initial[k] = initial[i];
                points[1][k++] = points[1][i];
            }
        }
        points[1].resize(k);
        initial.resize(k);
        // 显示特征点和运动轨迹
        for (size_t i = 0; i<points[1].size(); i++)
        {
            line(output, initial[i], points[1][i], Scalar(0, 0, 255));
            circle(output, points[1][i], 3, Scalar(0, 255, 0), -1);
        }
    
        // 把当前跟踪结果作为下一此参考
        swap(points[1], points[0]);
        swap(gray_prev, gray);
    
        imshow(window_name, output);
    }
    
    //-------------------------------------------------------------------------------------------------
    // function: addNewPoints
    // brief: 检测新点是否应该被添加
    // parameter:
    // return: 是否被添加标志
    //-------------------------------------------------------------------------------------------------
    bool addNewPoints()
    {
        return points[0].size() <= 10;
    }
    
    //-------------------------------------------------------------------------------------------------
    // function: acceptTrackedPoint
    // brief: 决定哪些跟踪点被接受
    // parameter:
    // return:
    //-------------------------------------------------------------------------------------------------
    bool acceptTrackedPoint(int i)
    {
        return status[i] && ((abs(points[0][i].x - points[1][i].x) + abs(points[0][i].y - points[1][i].y)) > 2);
    }
    View Code

    注意需要一个源视频文件名为1.avi

    3、点追踪:lkdemo

    也在对应的目录下...opencvsourcessamplescpp的lkdemo.cpp文件中,程序运行之后,会自动启用摄像头,这时按键盘上的“r”键来启动自动点追踪,便可以看到对应的效果。我们在摄像头中移动物体,可以看到物体上的点随着物体一同移动。

    #include "opencv2/video/tracking.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "opencv2/highgui/highgui.hpp"
    
    #include <iostream>
    #include <ctype.h>
    
    using namespace cv;
    using namespace std;
    
    static void help()
    {
        // print a welcome message, and the OpenCV version
        cout << "
    This is a demo of Lukas-Kanade optical flow lkdemo(),
    "
            "Using OpenCV version " << CV_VERSION << endl;
        cout << "
    It uses camera by default, but you can provide a path to video as an argument.
    ";
        cout << "
    Hot keys: 
    "
            "	ESC - quit the program
    "
            "	r - auto-initialize tracking
    "
            "	c - delete all the points
    "
            "	n - switch the "night" mode on/off
    "
            "To add/remove a feature point click it
    " << endl;
    }
    
    Point2f point;
    bool addRemovePt = false;
    
    static void onMouse(int event, int x, int y, int /*flags*/, void* /*param*/)
    {
        if (event == CV_EVENT_LBUTTONDOWN)
        {
            point = Point2f((float)x, (float)y);
            addRemovePt = true;
        }
    }
    
    int main(int argc, char** argv)
    {
        help();
    
        VideoCapture cap;
        TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
        Size subPixWinSize(10, 10), winSize(31, 31);
    
        const int MAX_COUNT = 500;
        bool needToInit = false;
        bool nightMode = false;
    
        if (argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
            cap.open(argc == 2 ? argv[1][0] - '0' : 0);
        else if (argc == 2)
            cap.open(argv[1]);
    
        if (!cap.isOpened())
        {
            cout << "Could not initialize capturing...
    ";
            return 0;
        }
    
        namedWindow("LK Demo", 1);
        setMouseCallback("LK Demo", onMouse, 0);
    
        Mat gray, prevGray, image;
        vector<Point2f> points[2];
    
        for (;;)
        {
            Mat frame;
            cap >> frame;
            if (frame.empty())
                break;
    
            frame.copyTo(image);
            cvtColor(image, gray, COLOR_BGR2GRAY);
    
            if (nightMode)
                image = Scalar::all(0);
    
            if (needToInit)
            {
                // automatic initialization
                goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
                cornerSubPix(gray, points[1], subPixWinSize, Size(-1, -1), termcrit);
                addRemovePt = false;
            }
            else if (!points[0].empty())
            {
                vector<uchar> status;
                vector<float> err;
                if (prevGray.empty())
                    gray.copyTo(prevGray);
                calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize,
                    3, termcrit, 0, 0.001);
                size_t i, k;
                for (i = k = 0; i < points[1].size(); i++)
                {
                    if (addRemovePt)
                    {
                        if (norm(point - points[1][i]) <= 5)
                        {
                            addRemovePt = false;
                            continue;
                        }
                    }
    
                    if (!status[i])
                        continue;
    
                    points[1][k++] = points[1][i];
                    circle(image, points[1][i], 3, Scalar(0, 255, 0), -1, 8);
                }
                points[1].resize(k);
            }
    
            if (addRemovePt && points[1].size() < (size_t)MAX_COUNT)
            {
                vector<Point2f> tmp;
                tmp.push_back(point);
                cornerSubPix(gray, tmp, winSize, cvSize(-1, -1), termcrit);
                points[1].push_back(tmp[0]);
                addRemovePt = false;
            }
    
            needToInit = false;
            imshow("LK Demo", image);
    
            char c = (char)waitKey(10);
            if (c == 27)
                break;
            switch (c)
            {
            case 'r':
                needToInit = true;
                break;
            case 'c':
                points[0].clear();
                points[1].clear();
                break;
            case 'n':
                nightMode = !nightMode;
                break;
            }
    
            std::swap(points[1], points[0]);
            cv::swap(prevGray, gray);
        }
    
        return 0;
    }
    View Code

    4、人脸识别:objectDetection
    人脸识别是图像处理与OpenCV非常重要的应用之一,opencv官方专门有教程和代码讲解其实现方法。此示例程序就是使用objdetect模块检测摄像头视频流中的人脸,位于...opencvsourcessamplescpp utorial_codeobjectDetection路径之下。需要注意的是,要将“...opencvsourcesdatahaarcascades”路径下的haarcascade_eye_tree_eyeglasses.xml和haarcascade_frontalface_alt.xml文件复制到和源文件同一目录中,才能正确运行。运行程序,将自己的脸对准摄像头,或者放置一张照片对准摄像头任其捕获,便可以发现程序准确地识别除了人脸,并用彩色的圆将脸圈出。

    //--------------------------------------【程序说明】-------------------------------------------
    //        程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序11
    //        程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-人脸识别
    //        开发测试所用操作系统: Windows 7 64bit
    //        开发测试所用IDE版本:Visual Studio 2010
    //        开发测试所用OpenCV版本:    2.4.9
    //        2014年11月 Revised by @浅墨_毛星云
    //------------------------------------------------------------------------------------------------
    
    
    /**
    * @file ObjectDetection.cpp
    * @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
    * @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream
    */
    
    //---------------------------------【头文件、命名空间包含部分】----------------------------
    //        描述:包含程序所使用的头文件和命名空间
    //-------------------------------------------------------------------------------------------------
    #include "opencv2/objdetect/objdetect.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    
    #include <iostream>
    #include <stdio.h>
    
    using namespace std;
    using namespace cv;
    
    
    
    
    void detectAndDisplay(Mat frame);
    
    //--------------------------------【全局变量声明】----------------------------------------------
    //        描述:声明全局变量
    //-------------------------------------------------------------------------------------------------
    //注意,需要把"haarcascade_frontalface_alt.xml"和"haarcascade_eye_tree_eyeglasses.xml"这两个文件复制到工程路径下
    String face_cascade_name = "haarcascade_frontalface_alt.xml";
    String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
    CascadeClassifier face_cascade;
    CascadeClassifier eyes_cascade;
    string window_name = "Capture - Face detection";
    RNG rng(12345);
    
    
    //--------------------------------【help( )函数】----------------------------------------------
    //        描述:输出帮助信息
    //-------------------------------------------------------------------------------------------------
    static void ShowHelpText()
    {
        //输出欢迎信息和OpenCV版本
        cout << "
    
    			非常感谢购买《OpenCV3编程入门》一书!
    "
            << "
    
    			此为本书OpenCV2版的第11个配套示例程序
    "
            << "
    
    			   当前使用的OpenCV版本为:" << CV_VERSION
            << "
    
      ----------------------------------------------------------------------------";
    }
    
    
    //-----------------------------------【main( )函数】--------------------------------------------
    //        描述:控制台应用程序的入口函数,我们的程序从这里开始
    //-------------------------------------------------------------------------------------------------
    int main(void)
    {
        VideoCapture capture;
        Mat frame;
    
    
        //-- 1. 加载级联(cascades)
        if (!face_cascade.load(face_cascade_name)){ printf("--(!)Error loading
    "); return -1; };
        if (!eyes_cascade.load(eyes_cascade_name)){ printf("--(!)Error loading
    "); return -1; };
    
        //-- 2. 读取视频
        capture.open(0);
        ShowHelpText();
        if (capture.isOpened())
        {
            for (;;)
            {
                capture >> frame;
    
                //-- 3. 对当前帧使用分类器(Apply the classifier to the frame)
                if (!frame.empty())
                {
                    detectAndDisplay(frame);
                }
                else
                {
                    printf(" --(!) No captured frame -- Break!"); break;
                }
    
                int c = waitKey(10);
                if ((char)c == 'c') { break; }
    
            }
        }
        return 0;
    }
    
    
    void detectAndDisplay(Mat frame)
    {
        std::vector<Rect> faces;
        Mat frame_gray;
    
        cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
        equalizeHist(frame_gray, frame_gray);
        //-- 人脸检测
        face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
    
        for (size_t i = 0; i < faces.size(); i++)
        {
            Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
            ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 2, 8, 0);
    
            Mat faceROI = frame_gray(faces[i]);
            std::vector<Rect> eyes;
    
            //-- 在脸中检测眼睛
            eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
    
            for (size_t j = 0; j < eyes.size(); j++)
            {
                Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
                int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
                circle(frame, eye_center, radius, Scalar(255, 0, 0), 3, 8, 0);
            }
        }
        //-- 显示最终效果图
        imshow(window_name, frame);
    }
    View Code

    5、支持向量机引导

    在opencv的机器学习模块中,官方为我们准备了两个示例程序,第一个程序是使用CvSVM::train函数训练一个SVM分类器,第二个示例程序主要用于讲解在训练数据线性不可分时,如何定义支持向量机的最优化问题。

    //--------------------------------------【程序说明】-------------------------------------------
    //        程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序12
    //        程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-支持向量机SVM引导
    //        测试所用操作系统: Windows 7 64bit
    //        测试所用IDE版本:Visual Studio 2010
    //        测试所用OpenCV版本:    2.4.9
    //        2014年11月 Revised by @浅墨_毛星云
    //------------------------------------------------------------------------------------------------
    
    
    //---------------------------------【头文件、命名空间包含部分】----------------------------
    //        描述:包含程序所使用的头文件和命名空间
    //-------------------------------------------------------------------------------------------------
    #include <opencv2/core/core.hpp>
    #include <opencv2/highgui/highgui.hpp>
    #include <opencv2/ml/ml.hpp>
    using namespace cv;
    
    
    
    
    //--------------------------------【help( )函数】----------------------------------------------
    //        描述:输出帮助信息
    //-------------------------------------------------------------------------------------------------
    
    //-----------------------------------【ShowHelpText( )函数】----------------------------------
    //          描述:输出一些帮助信息
    //----------------------------------------------------------------------------------------------
    void ShowHelpText()
    {
        //输出欢迎信息和OpenCV版本
        printf("
    
    			非常感谢购买《OpenCV3编程入门》一书!
    ");
        printf("
    
    			此为本书OpenCV2版的第12个配套示例程序
    ");
        printf("
    
    			   当前使用的OpenCV版本为:" CV_VERSION);
        printf("
    
      ----------------------------------------------------------------------------
    ");
    }
    
    //-----------------------------------【main( )函数】--------------------------------------------
    //        描述:控制台应用程序的入口函数,我们的程序从这里开始
    //-------------------------------------------------------------------------------------------------
    int main()
    {
        // 视觉表达数据的设置(Data for visual representation)
        int width = 512, height = 512;
        Mat image = Mat::zeros(height, width, CV_8UC3);
    
        //建立训练数据( Set up training data)
        float labels[4] = { 1.0, -1.0, -1.0, -1.0 };
        Mat labelsMat(3, 1, CV_32FC1, labels);
    
        float trainingData[4][2] = { { 501, 10 }, { 255, 10 }, { 501, 255 }, { 10, 501 } };
        Mat trainingDataMat(3, 2, CV_32FC1, trainingData);
    
        ShowHelpText();
    
        //设置支持向量机的参数(Set up SVM's parameters)
        CvSVMParams params;
        params.svm_type = CvSVM::C_SVC;
        params.kernel_type = CvSVM::LINEAR;
        params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
    
        // 训练支持向量机(Train the SVM)
        CvSVM SVM;
        SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);
    
        Vec3b green(0, 255, 0), blue(255, 0, 0);
        //显示由SVM给出的决定区域 (Show the decision regions given by the SVM)
        for (int i = 0; i < image.rows; ++i)
            for (int j = 0; j < image.cols; ++j)
            {
            Mat sampleMat = (Mat_<float>(1, 2) << i, j);
            float response = SVM.predict(sampleMat);
    
            if (response == 1)
                image.at<Vec3b>(j, i) = green;
            else if (response == -1)
                image.at<Vec3b>(j, i) = blue;
            }
    
        //显示训练数据 (Show the training data)
        int thickness = -1;
        int lineType = 8;
        circle(image, Point(501, 10), 5, Scalar(0, 0, 0), thickness, lineType);
        circle(image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType);
        circle(image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType);
        circle(image, Point(10, 501), 5, Scalar(255, 255, 255), thickness, lineType);
    
        //显示支持向量 (Show support vectors)
        thickness = 2;
        lineType = 8;
        int c = SVM.get_support_vector_count();
    
        for (int i = 0; i < c; ++i)
        {
            const float* v = SVM.get_support_vector(i);
            circle(image, Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
        }
    
        imwrite("result.png", image);        // 保存图像
    
        imshow("SVM Simple Example", image); // 显示图像
        waitKey(0);
    
    }
    View Code

    上面程序的结果如下:

    //--------------------------------------【程序说明】-------------------------------------------
    //        程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序13
    //        程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-支持向量机SVM之处理线性不可分数据
    //        测试所用操作系统: Windows 7 64bit
    //        测试所用IDE版本:Visual Studio 2010
    //        测试所用OpenCV版本:    2.4.9
    //        2014年11月 Revised by @浅墨_毛星云
    //------------------------------------------------------------------------------------------------
    
    
    
    //---------------------------------【头文件、命名空间包含部分】----------------------------
    //        描述:包含程序所使用的头文件和命名空间
    //------------------------------------------------------------------------------------------------
    #include <iostream>
    #include <opencv2/core/core.hpp>
    #include <opencv2/highgui/highgui.hpp>
    #include <opencv2/ml/ml.hpp>
    
    #define NTRAINING_SAMPLES   100         // 每类训练样本的数量
    #define FRAC_LINEAR_SEP     0.9f        //  部分(Fraction)线性可分的样本组成部分
    
    using namespace cv;
    using namespace std;
    
    
    
    
    //-----------------------------------【ShowHelpText( )函数】----------------------------------
    //          描述:输出一些帮助信息
    //----------------------------------------------------------------------------------------------
    void ShowHelpText()
    {
        //输出欢迎信息和OpenCV版本
        printf("
    
    			非常感谢购买《OpenCV3编程入门》一书!
    ");
        printf("
    
    			此为本书OpenCV2版的第13个配套示例程序
    ");
        printf("
    
    			   当前使用的OpenCV版本为:" CV_VERSION );
        printf("
    
      ----------------------------------------------------------------------------
    ");
    }
    
    //-----------------------------------【main( )函数】--------------------------------------------
    //        描述:控制台应用程序的入口函数,我们的程序从这里开始
    //-------------------------------------------------------------------------------------------------
    int main()
    {
        //设置视觉表达的参数
        const int WIDTH = 512, HEIGHT = 512;
        Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
        ShowHelpText();
    
        //--------------------- 【1】随机建立训练数据 ---------------------------------------
        Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32FC1);
        Mat labels   (2*NTRAINING_SAMPLES, 1, CV_32FC1);
    
        RNG rng(100); // 随机生成值
    
        //建立训练数据的线性可分的组成部分
        int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
    
        // 为Class1生成随机点
        Mat trainClass = trainData.rowRange(0, nLinearSamples);
        // 点的x坐标为[0,0.4)
        Mat c = trainClass.colRange(0, 1);
        rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
        // 点的Y坐标为[0,1)
        c = trainClass.colRange(1,2);
        rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
    
        // 为Class2生成随机点
        trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
        // 点的x坐标为[0.6, 1]
        c = trainClass.colRange(0 , 1); 
        rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
        // 点的Y坐标为[0, 1)
        c = trainClass.colRange(1,2);
        rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
    
        //------------------建立训练数据的非线性可分组成部分 ---------------
    
        // 随机生成Class1和Class2的点
        trainClass = trainData.rowRange(  nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
        // 点的x坐标为[0.4, 0.6)
        c = trainClass.colRange(0,1);
        rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH)); 
        // 点的y坐标为[0, 1)
        c = trainClass.colRange(1,2);
        rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
    
        //-------------------------设置类标签 ---------------------------------
        labels.rowRange(                0,   NTRAINING_SAMPLES).setTo(1);  // Class 1
        labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2);  // Class 2
    
        //------------------------ 2. 设置支持向量机的参数 --------------------
        CvSVMParams params;
        params.svm_type    = SVM::C_SVC;
        params.C           = 0.1;
        params.kernel_type = SVM::LINEAR;
        params.term_crit   = TermCriteria(CV_TERMCRIT_ITER, (int)1e7, 1e-6);
    
        //------------------------ 3. 训练支持向量机 ----------------------------------------------------
        cout << "Starting training process" << endl;
        CvSVM svm;
        svm.train(trainData, labels, Mat(), Mat(), params);
        cout << "Finished training process" << endl;
    
        //------------------------ 4. 标出决策区域(decision regions) ----------------------------------------
        Vec3b green(0,100,0), blue (100,0,0);
        for (int i = 0; i < I.rows; ++i)
            for (int j = 0; j < I.cols; ++j)
            {
                Mat sampleMat = (Mat_<float>(1,2) << i, j);
                float response = svm.predict(sampleMat);
    
                if      (response == 1)    I.at<Vec3b>(j, i)  = green;
                else if (response == 2)    I.at<Vec3b>(j, i)  = blue;
            }
    
            //----------------------- 5. 显示训练数据(training data) --------------------------------------------
            int thick = -1;
            int lineType = 8;
            float px, py;
            // Class 1
            for (int i = 0; i < NTRAINING_SAMPLES; ++i)
            {
                px = trainData.at<float>(i,0);
                py = trainData.at<float>(i,1);
                circle(I, Point( (int) px,  (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
            }
            // Class 2
            for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
            {
                px = trainData.at<float>(i,0);
                py = trainData.at<float>(i,1);
                circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
            }
    
            //------------------------- 6. 显示支持向量(support vectors) --------------------------------------------
            thick = 2;
            lineType  = 8;
            int x     = svm.get_support_vector_count();
    
            for (int i = 0; i < x; ++i)
            {
                const float* v = svm.get_support_vector(i);
                circle( I,  Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
            }
    
            imwrite("result.png", I);                      //保存图像到文件
            imshow("SVM for Non-Linear Training Data", I); // 显示最终窗口
            waitKey(0);
    }
    View Code

  • 相关阅读:
    ASP.NET Core 2.1 Web API + Identity Server 4 + Angular 6 + Angular Material 实战小项目视频
    .NET Core TDD 前传: 编写易于测试的代码 -- 单一职责
    学习Identity Server 4的预备知识 (误删, 重补)
    .NET Core TDD 前传: 编写易于测试的代码 -- 全局状态
    .NET Core TDD 前传: 编写易于测试的代码 -- 依赖项
    .NET Core TDD 前传: 编写易于测试的代码 -- 构建对象
    .NET Core TDD 前传: 编写易于测试的代码 -- 缝
    SpringBoot入门教程(十)应用监控Actuator
    SpringBoot入门教程(九)定时任务Schedule
    SpringBoot入门教程(八)配置logback日志
  • 原文地址:https://www.cnblogs.com/gary-guo/p/6646368.html
Copyright © 2011-2022 走看看