zoukankan      html  css  js  c++  java
  • Opencv 九点标定

    Opencv 3.4.2

    项目位置:

    F:visionYolo estProjectExeComAcsMFCTest MThreadMFCTest

    代码实现:

    #include "include/opencv2/video/tracking.hpp"
    void CMFCTestDlg::OnBnClickedBtn9pointcalib()
    {
        std::vector<Point2f> points_camera; 
        std::vector<Point2f> points_robot; 
    
        /*points_camera.push_back(Point2f(1372.36f, 869.00F));
        points_camera.push_back(Point2f(1758.86f, 979.07f));
        points_camera.push_back(Point2f(2145.75f, 1090.03f));
        points_camera.push_back(Point2f(2040.02f, 1461.64f));
        points_camera.push_back(Point2f(1935.01f, 1833.96f));
        points_camera.push_back(Point2f(1546.79f, 1724.21f));
        points_camera.push_back(Point2f(1158.53f, 1613.17f));
        points_camera.push_back(Point2f(1265.07f, 1240.49f));
        points_camera.push_back(Point2f(1652.6f, 1351.27f));
        
        points_robot.push_back(Point2f(98.884f, 320.881f));
        points_robot.push_back(Point2f(109.81f, 322.27f));
        points_robot.push_back(Point2f(120.62f, 323.695f));
        points_robot.push_back(Point2f(121.88f, 313.154f));
        points_robot.push_back(Point2f(123.11f, 302.671f));
        points_robot.push_back(Point2f(112.23f, 301.107f));
        points_robot.push_back(Point2f(101.626f, 299.816f));
        points_robot.push_back(Point2f(100.343f, 310.447f));
        points_robot.push_back(Point2f(111.083f, 311.665f));*/
    
        points_camera.push_back(Point2f(1516.14f,1119.48f));
        points_camera.push_back(Point2f(1516.24f,967.751f));
        points_camera.push_back(Point2f(1668.55f,967.29f));
        points_camera.push_back(Point2f(1668.44f,1118.93f));
        points_camera.push_back(Point2f(1668.54f,1270.57f));
        points_camera.push_back(Point2f(1516.22f,1271.37f));
        points_camera.push_back(Point2f(1364.21f,1272.15f));
        points_camera.push_back(Point2f(1364.08f,1120.42f));
        points_camera.push_back(Point2f(1364.08f,968.496f));
    
        points_robot.push_back(Point2f(35.7173f,18.8248f));
        points_robot.push_back(Point2f(40.7173f, 18.8248f));
        points_robot.push_back(Point2f(40.7173f, 13.8248f));
        points_robot.push_back(Point2f(35.7173f, 13.8248f));
        points_robot.push_back(Point2f(30.7173f, 13.8248f));
        points_robot.push_back(Point2f(30.7173f, 18.8248f));
        points_robot.push_back(Point2f(30.7173f, 23.8248f));
        points_robot.push_back(Point2f(35.7173f, 23.8248f));
        points_robot.push_back(Point2f(40.7173f, 23.8248f));
        
        /*for (int i = 0;i < 9;i++)
        {
            points_camera.push_back(Point2f(i+1, i+1));
            points_robot.push_back(Point2f(2*(i + 1), 2*(i + 1)));
        }*/
        
        //变换成 2*3 矩阵
        Mat  affine;
        estimateRigidTransform(points_camera, points_robot, true).convertTo(affine, CV_32F);
        if (affine.empty())
        {
            affine = LocalAffineEstimate(points_camera, points_robot, true);
        }
        float A, B, C, D, E, F;
        A = affine.at<float>(0, 0);
        B = affine.at<float>(0, 1);
        C = affine.at<float>(0, 2);
        D = affine.at<float>(1, 0);
        E = affine.at<float>(1, 1);
        F = affine.at<float>(1, 2);
    
        //坐标转换
        //Point2f src = Point2f(1652.6f, 1351.27f);
        //Point2f src = Point2f(100, 100);
        Point2f src = Point2f(1516.14f, 1119.48f);
        Point2f dst;
        dst.x = A * src.x + B * src.y + C;
        dst.y = D * src.x + E * src.y + F;
    
        //RMS 标定偏差
        std::vector<Point2f> points_Calc;
        double sumX = 0, sumY = 0;
        for (int i = 0;i < points_camera.size();i++)
        {
            Point2f pt;
            pt.x = A * points_camera[i].x + B * points_camera[i].y + C;
            pt.y = D * points_camera[i].x + E * points_camera[i].y + F;
            points_Calc.push_back(pt);
            sumX += pow(points_robot[i].x - points_Calc[i].x,2);
            sumY += pow(points_robot[i].y - points_Calc[i].y, 2);
        }
        double rmsX, rmsY;
        rmsX = sqrt(sumX / points_camera.size());
        rmsY = sqrt(sumY / points_camera.size());
        CString str;
        str.Format("偏差值:%0.3f,%0.3f", rmsX, rmsY);
        AfxMessageBox(str);
    }
    
    Mat CMFCTestDlg::LocalAffineEstimate(const std::vector<Point2f>& shape1, const std::vector<Point2f>& shape2,
        bool fullAfine)
    {
        Mat out(2, 3, CV_32F);
        int siz = 2 * (int)shape1.size();
    
        if (fullAfine)
        {
            Mat matM(siz, 6, CV_32F);
            Mat matP(siz, 1, CV_32F);
            int contPt = 0;
            for (int ii = 0; ii<siz; ii++)
            {
                Mat therow = Mat::zeros(1, 6, CV_32F);
                if (ii % 2 == 0)
                {
                    therow.at<float>(0, 0) = shape1[contPt].x;
                    therow.at<float>(0, 1) = shape1[contPt].y;
                    therow.at<float>(0, 2) = 1;
                    therow.row(0).copyTo(matM.row(ii));
                    matP.at<float>(ii, 0) = shape2[contPt].x;
                }
                else
                {
                    therow.at<float>(0, 3) = shape1[contPt].x;
                    therow.at<float>(0, 4) = shape1[contPt].y;
                    therow.at<float>(0, 5) = 1;
                    therow.row(0).copyTo(matM.row(ii));
                    matP.at<float>(ii, 0) = shape2[contPt].y;
                    contPt++;
                }
            }
            Mat sol;
            solve(matM, matP, sol, DECOMP_SVD);
            out = sol.reshape(0, 2);
        }
        else
        {
            Mat matM(siz, 4, CV_32F);
            Mat matP(siz, 1, CV_32F);
            int contPt = 0;
            for (int ii = 0; ii<siz; ii++)
            {
                Mat therow = Mat::zeros(1, 4, CV_32F);
                if (ii % 2 == 0)
                {
                    therow.at<float>(0, 0) = shape1[contPt].x;
                    therow.at<float>(0, 1) = shape1[contPt].y;
                    therow.at<float>(0, 2) = 1;
                    therow.row(0).copyTo(matM.row(ii));
                    matP.at<float>(ii, 0) = shape2[contPt].x;
                }
                else
                {
                    therow.at<float>(0, 0) = -shape1[contPt].y;
                    therow.at<float>(0, 1) = shape1[contPt].x;
                    therow.at<float>(0, 3) = 1;
                    therow.row(0).copyTo(matM.row(ii));
                    matP.at<float>(ii, 0) = shape2[contPt].y;
                    contPt++;
                }
            }
            Mat sol;
            solve(matM, matP, sol, DECOMP_SVD);
            out.at<float>(0, 0) = sol.at<float>(0, 0);
            out.at<float>(0, 1) = sol.at<float>(1, 0);
            out.at<float>(0, 2) = sol.at<float>(2, 0);
            out.at<float>(1, 0) = -sol.at<float>(1, 0);
            out.at<float>(1, 1) = sol.at<float>(0, 0);
            out.at<float>(1, 2) = sol.at<float>(3, 0);
        }
        return out;
    }
  • 相关阅读:
    ButterKnife 原理解析
    有关java之反射的使用
    Integer 与 int 中的 ==
    下拉框、多选框、单选框 通过TagHelper绑定数据
    动态构建视图表单
    添加我的应用中的后台图标
    标准服务接口示例代码
    .net Core下的 常用方法
    使用Redirect跳转
    标准表单提交示例代码
  • 原文地址:https://www.cnblogs.com/profession/p/13425748.html
Copyright © 2011-2022 走看看