zoukankan      html  css  js  c++  java
  • VI ORB-SLAM初始化与VINS初始化对比(将vi orb-slam初始化方法移植到vins中)

    初始化时需要求出的变量:相机和imu外参r t、重力g、尺度s、陀螺仪和加速度计偏置ba bg。

    下面对两种算法初始化的详细步骤进行对比:

    求陀螺仪偏置bg

    • 求解公式相同,求解方法不同。公式如下,VI ORB-SLAM使用图优化的方式。

        

    Vector3d Optimizer::OptimizeInitialGyroBias(const vector<cv::Mat>& vTwc, const vector<IMUPreintegrator>& vImuPreInt)
    {
        int N = vTwc.size(); if(vTwc.size()!=vImuPreInt.size()) cerr<<"vTwc.size()!=vImuPreInt.size()"<<endl;
        Matrix4d Tbc = ConfigParam::GetEigTbc();
        Matrix3d Rcb = Tbc.topLeftCorner(3,3).transpose();
    
        // Setup optimizer
        g2o::SparseOptimizer optimizer;
        g2o::BlockSolverX::LinearSolverType * linearSolver;
    
        linearSolver = new g2o::LinearSolverEigen<g2o::BlockSolverX::PoseMatrixType>();
    
        g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver);
    
        g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr);
        //g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
        optimizer.setAlgorithm(solver);
    
        // Add vertex of gyro bias, to optimizer graph
        g2o::VertexGyrBias * vBiasg = new g2o::VertexGyrBias();
        vBiasg->setEstimate(Eigen::Vector3d::Zero());
        vBiasg->setId(0);
        optimizer.addVertex(vBiasg);
    
        // Add unary edges for gyro bias vertex
        //for(std::vector<KeyFrame*>::const_iterator lit=vpKFs.begin(), lend=vpKFs.end(); lit!=lend; lit++)
        for(int i=0; i<N; i++)
        {
            // Ignore the first KF
            if(i==0)
                continue;
    
            const cv::Mat& Twi = vTwc[i-1];    // pose of previous KF
            Matrix3d Rwci = Converter::toMatrix3d(Twi.rowRange(0,3).colRange(0,3));
            //Matrix3d Rwci = Twi.rotation_matrix();
            const cv::Mat& Twj = vTwc[i];        // pose of this KF
            Matrix3d Rwcj = Converter::toMatrix3d(Twj.rowRange(0,3).colRange(0,3));
            //Matrix3d Rwcj =Twj.rotation_matrix();
    
            const IMUPreintegrator& imupreint = vImuPreInt[i];
    
            g2o::EdgeGyrBias * eBiasg = new g2o::EdgeGyrBias();
            eBiasg->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(0)));
            // measurement is not used in EdgeGyrBias
            eBiasg->dRbij = imupreint.getDeltaR();
            eBiasg->J_dR_bg = imupreint.getJRBiasg();
            eBiasg->Rwbi = Rwci*Rcb;
            eBiasg->Rwbj = Rwcj*Rcb;
            //eBiasg->setInformation(Eigen::Matrix3d::Identity());
            eBiasg->setInformation(imupreint.getCovPVPhi().bottomRightCorner(3,3).inverse());
            optimizer.addEdge(eBiasg);
        }
    
        // It's actualy a linear estimator, so 1 iteration is enough.
        //optimizer.setVerbose(true);
        optimizer.initializeOptimization();
        optimizer.optimize(1);
    
        g2o::VertexGyrBias * vBgEst = static_cast<g2o::VertexGyrBias*>(optimizer.vertex(0));
    
        return vBgEst->estimate();
    }
    View Code
    • VINS中公式如下。使用LDLT分解,解方程组。

       

    void solveGyroscopeBias(map<double, ImageFrame> &all_image_frame, Vector3d* Bgs)
    {
        Matrix3d A;
        Vector3d b;
        Vector3d delta_bg;
        A.setZero();
        b.setZero();
        map<double, ImageFrame>::iterator frame_i;
        map<double, ImageFrame>::iterator frame_j;
        for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end(); frame_i++)
        {
            frame_j = next(frame_i);
            MatrixXd tmp_A(3, 3);
            tmp_A.setZero();
            VectorXd tmp_b(3);
            tmp_b.setZero();
            Eigen::Quaterniond q_ij(frame_i->second.R.transpose() * frame_j->second.R);
            tmp_A = frame_j->second.pre_integration->jacobian.template block<3, 3>(O_R, O_BG);
            tmp_b = 2 * (frame_j->second.pre_integration->delta_q.inverse() * q_ij).vec();
            A += tmp_A.transpose() * tmp_A;
            b += tmp_A.transpose() * tmp_b;
    
        }
        delta_bg = A.ldlt().solve(b);
        ROS_WARN_STREAM("gyroscope bias initial calibration " << delta_bg.transpose());
    
        for (int i = 0; i <= WINDOW_SIZE; i++)
            Bgs[i] += delta_bg;
    
        for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end( ); frame_i++)
        {
            frame_j = next(frame_i);
            frame_j->second.pre_integration->repropagate(Vector3d::Zero(), Bgs[0]);
        }
    }
    View Code

    求重力加速度g、尺度s和外参平移量T

    • VINS中并没有求外参T,而是在紧耦合优化时以初值为0直接进行优化。两算法求取公式略有不同。VI ORB-SLAM公式如下,使用SVD分解求解方程的解:

        

        

    // Solve A*x=B for x=[s,gw] 4x1 vector
        cv::Mat A = cv::Mat::zeros(3*(N-2),4,CV_32F);
        cv::Mat B = cv::Mat::zeros(3*(N-2),1,CV_32F);
        cv::Mat I3 = cv::Mat::eye(3,3,CV_32F);
    
        // Step 2.
        // Approx Scale and Gravity vector in 'world' frame (first KF's camera frame)
        for(int i=0; i<N-2; i++)
        {
            //KeyFrameInit* pKF1 = vKFInit[i];//vScaleGravityKF[i];
            KeyFrameInit* pKF2 = vKFInit[i+1];
            KeyFrameInit* pKF3 = vKFInit[i+2];
            // Delta time between frames
            double dt12 = pKF2->mIMUPreInt.getDeltaTime();
            double dt23 = pKF3->mIMUPreInt.getDeltaTime();
            // Pre-integrated measurements
            cv::Mat dp12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaP());
            cv::Mat dv12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaV());
            cv::Mat dp23 = Converter::toCvMat(pKF3->mIMUPreInt.getDeltaP());
    
            // Pose of camera in world frame
            cv::Mat Twc1 = vTwc[i].clone();//pKF1->GetPoseInverse();
            cv::Mat Twc2 = vTwc[i+1].clone();//pKF2->GetPoseInverse();
            cv::Mat Twc3 = vTwc[i+2].clone();//pKF3->GetPoseInverse();
            // Position of camera center
            cv::Mat pc1 = Twc1.rowRange(0,3).col(3);
            cv::Mat pc2 = Twc2.rowRange(0,3).col(3);
            cv::Mat pc3 = Twc3.rowRange(0,3).col(3);
            // Rotation of camera, Rwc
            cv::Mat Rc1 = Twc1.rowRange(0,3).colRange(0,3);
            cv::Mat Rc2 = Twc2.rowRange(0,3).colRange(0,3);
            cv::Mat Rc3 = Twc3.rowRange(0,3).colRange(0,3);
    
            // Stack to A/B matrix
            // lambda*s + beta*g = gamma
            cv::Mat lambda = (pc2-pc1)*dt23 + (pc2-pc3)*dt12;
            cv::Mat beta = 0.5*I3*(dt12*dt12*dt23 + dt12*dt23*dt23);
            cv::Mat gamma = (Rc3-Rc2)*pcb*dt12 + (Rc1-Rc2)*pcb*dt23 + Rc1*Rcb*dp12*dt23 - Rc2*Rcb*dp23*dt12 - Rc1*Rcb*dv12*dt12*dt23;
            lambda.copyTo(A.rowRange(3*i+0,3*i+3).col(0));
            beta.copyTo(A.rowRange(3*i+0,3*i+3).colRange(1,4));
            gamma.copyTo(B.rowRange(3*i+0,3*i+3));
            // Tested the formulation in paper, -gamma. Then the scale and gravity vector is -xx
    
            // Debug log
            //cout<<"iter "<<i<<endl;
        }
        // Use svd to compute A*x=B, x=[s,gw] 4x1 vector
        // A = u*w*vt, u*w*vt*x=B
        // Then x = vt'*winv*u'*B
        cv::Mat w,u,vt;
        // Note w is 4x1 vector by SVDecomp()
        // A is changed in SVDecomp() with cv::SVD::MODIFY_A for speed
        cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A);
        // Debug log
        //cout<<"u:"<<endl<<u<<endl;
        //cout<<"vt:"<<endl<<vt<<endl;
        //cout<<"w:"<<endl<<w<<endl;
    
        // Compute winv
        cv::Mat winv=cv::Mat::eye(4,4,CV_32F);
        for(int i=0;i<4;i++)
        {
            if(fabs(w.at<float>(i))<1e-10)
            {
                w.at<float>(i) += 1e-10;
                // Test log
                cerr<<"w(i) < 1e-10, w="<<endl<<w<<endl;
            }
    
            winv.at<float>(i,i) = 1./w.at<float>(i);
        }
        // Then x = vt'*winv*u'*B
        cv::Mat x = vt.t()*winv*u.t()*B;
    
        // x=[s,gw] 4x1 vector
        double sstar = x.at<float>(0);    // scale should be positive
        cv::Mat gwstar = x.rowRange(1,4);   // gravity should be about ~9.8
    View Code
    •  VINS求出尺度s、重力加速度g和速度v(这里求出滑窗内每帧的速度v意义在哪里?),并没有求外参的平移,方法为LDLT分解。公式如下:

        

        

    bool LinearAlignment(map<double, ImageFrame> &all_image_frame, Vector3d &g, VectorXd &x)
    {
        int all_frame_count = all_image_frame.size();
        int n_state = all_frame_count * 3 + 3 + 1;
    
        MatrixXd A{n_state, n_state};
        A.setZero();
        VectorXd b{n_state};
        b.setZero();
    
        map<double, ImageFrame>::iterator frame_i;
        map<double, ImageFrame>::iterator frame_j;
        int i = 0;
        for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end(); frame_i++, i++)
        {
            frame_j = next(frame_i);
    
            MatrixXd tmp_A(6, 10);
            tmp_A.setZero();
            VectorXd tmp_b(6);
            tmp_b.setZero();
    
            double dt = frame_j->second.pre_integration->sum_dt;
    
            tmp_A.block<3, 3>(0, 0) = -dt * Matrix3d::Identity();
            tmp_A.block<3, 3>(0, 6) = frame_i->second.R.transpose() * dt * dt / 2 * Matrix3d::Identity();
            tmp_A.block<3, 1>(0, 9) = frame_i->second.R.transpose() * (frame_j->second.T - frame_i->second.T) / 100.0;     
            tmp_b.block<3, 1>(0, 0) = frame_j->second.pre_integration->delta_p + frame_i->second.R.transpose() * frame_j->second.R * TIC[0] - TIC[0];
            //cout << "delta_p   " << frame_j->second.pre_integration->delta_p.transpose() << endl;
            tmp_A.block<3, 3>(3, 0) = -Matrix3d::Identity();
            tmp_A.block<3, 3>(3, 3) = frame_i->second.R.transpose() * frame_j->second.R;
            tmp_A.block<3, 3>(3, 6) = frame_i->second.R.transpose() * dt * Matrix3d::Identity();
            tmp_b.block<3, 1>(3, 0) = frame_j->second.pre_integration->delta_v;
            //cout << "delta_v   " << frame_j->second.pre_integration->delta_v.transpose() << endl;
    
            Matrix<double, 6, 6> cov_inv = Matrix<double, 6, 6>::Zero();
            //cov.block<6, 6>(0, 0) = IMU_cov[i + 1];
            //MatrixXd cov_inv = cov.inverse();
            cov_inv.setIdentity();
    
            MatrixXd r_A = tmp_A.transpose() * cov_inv * tmp_A;
            VectorXd r_b = tmp_A.transpose() * cov_inv * tmp_b;
    
            A.block<6, 6>(i * 3, i * 3) += r_A.topLeftCorner<6, 6>();
            b.segment<6>(i * 3) += r_b.head<6>();
    
            A.bottomRightCorner<4, 4>() += r_A.bottomRightCorner<4, 4>();
            b.tail<4>() += r_b.tail<4>();
    
            A.block<6, 4>(i * 3, n_state - 4) += r_A.topRightCorner<6, 4>();
            A.block<4, 6>(n_state - 4, i * 3) += r_A.bottomLeftCorner<4, 6>();
        }
        A = A * 1000.0;
        b = b * 1000.0;
        x = A.ldlt().solve(b);
        double s = x(n_state - 1) / 100.0;
        ROS_DEBUG("estimated scale: %f", s);
        g = x.segment<3>(n_state - 4);
        ROS_DEBUG_STREAM(" result g     " << g.norm() << " " << g.transpose());
        if(fabs(g.norm() - G.norm()) > 1.0 || s < 0)
        {
            return false;
        }
    
        RefineGravity(all_image_frame, g, x);
        s = (x.tail<1>())(0) / 100.0;
        (x.tail<1>())(0) = s;
        ROS_DEBUG_STREAM(" refine     " << g.norm() << " " << g.transpose());
        if(s < 0.0 )
            return false;   
        else
            return true;
    }
    View Code

      这里在求出g之后,还对其进行了优化,方法为LDLT分解,公式如下。

        

    void RefineGravity(map<double, ImageFrame> &all_image_frame, Vector3d &g, VectorXd &x)
    {
        Vector3d g0 = g.normalized() * G.norm();
        Vector3d lx, ly;
        //VectorXd x;
        int all_frame_count = all_image_frame.size();
        int n_state = all_frame_count * 3 + 2 + 1;
    
        MatrixXd A{n_state, n_state};
        A.setZero();
        VectorXd b{n_state};
        b.setZero();
    
        map<double, ImageFrame>::iterator frame_i;
        map<double, ImageFrame>::iterator frame_j;
        for(int k = 0; k < 4; k++)
        {
            MatrixXd lxly(3, 2);
            lxly = TangentBasis(g0);
            int i = 0;
            for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end(); frame_i++, i++)
            {
                frame_j = next(frame_i);
    
                MatrixXd tmp_A(6, 9);
                tmp_A.setZero();
                VectorXd tmp_b(6);
                tmp_b.setZero();
    
                double dt = frame_j->second.pre_integration->sum_dt;
    
    
                tmp_A.block<3, 3>(0, 0) = -dt * Matrix3d::Identity();
                tmp_A.block<3, 2>(0, 6) = frame_i->second.R.transpose() * dt * dt / 2 * Matrix3d::Identity() * lxly;
                tmp_A.block<3, 1>(0, 8) = frame_i->second.R.transpose() * (frame_j->second.T - frame_i->second.T) / 100.0;     
                tmp_b.block<3, 1>(0, 0) = frame_j->second.pre_integration->delta_p + frame_i->second.R.transpose() * frame_j->second.R * TIC[0] - TIC[0] - frame_i->second.R.transpose() * dt * dt / 2 * g0;
    
                tmp_A.block<3, 3>(3, 0) = -Matrix3d::Identity();
                tmp_A.block<3, 3>(3, 3) = frame_i->second.R.transpose() * frame_j->second.R;
                tmp_A.block<3, 2>(3, 6) = frame_i->second.R.transpose() * dt * Matrix3d::Identity() * lxly;
                tmp_b.block<3, 1>(3, 0) = frame_j->second.pre_integration->delta_v - frame_i->second.R.transpose() * dt * Matrix3d::Identity() * g0;
    
    
                Matrix<double, 6, 6> cov_inv = Matrix<double, 6, 6>::Zero();
                //cov.block<6, 6>(0, 0) = IMU_cov[i + 1];
                //MatrixXd cov_inv = cov.inverse();
                cov_inv.setIdentity();
    
                MatrixXd r_A = tmp_A.transpose() * cov_inv * tmp_A;
                VectorXd r_b = tmp_A.transpose() * cov_inv * tmp_b;
    
                A.block<6, 6>(i * 3, i * 3) += r_A.topLeftCorner<6, 6>();
                b.segment<6>(i * 3) += r_b.head<6>();
    
                A.bottomRightCorner<3, 3>() += r_A.bottomRightCorner<3, 3>();
                b.tail<3>() += r_b.tail<3>();
    
                A.block<6, 3>(i * 3, n_state - 3) += r_A.topRightCorner<6, 3>();
                A.block<3, 6>(n_state - 3, i * 3) += r_A.bottomLeftCorner<3, 6>();
            }
                A = A * 1000.0;
                b = b * 1000.0;
                x = A.ldlt().solve(b);
                VectorXd dg = x.segment<2>(n_state - 3);
                g0 = (g0 + lxly * dg).normalized() * G.norm();
                //double s = x(n_state - 1);
        }   
        g = g0;
    }
    View Code

    求加速度计偏置

    • VINS中并没有计算该值,和外参T一样,是在后面直接进行优化。VI ORB-SLAM中则单独对该值进行了求取,求取方式同样为SVD公式如下:

        

        

       

    // Step 3.
        // Use gravity magnitude 9.8 as constraint
        // gI = [0;0;1], the normalized gravity vector in an inertial frame, NED type with no orientation.
        cv::Mat gI = cv::Mat::zeros(3,1,CV_32F);
        gI.at<float>(2) = 1;
        // Normalized approx. gravity vecotr in world frame
        cv::Mat gwn = gwstar/cv::norm(gwstar);
        // Debug log
        //cout<<"gw normalized: "<<gwn<<endl;
    
        // vhat = (gI x gw) / |gI x gw|
        cv::Mat gIxgwn = gI.cross(gwn);
        double normgIxgwn = cv::norm(gIxgwn);
        cv::Mat vhat = gIxgwn/normgIxgwn;
        double theta = std::atan2(normgIxgwn,gI.dot(gwn));
        // Debug log
        //cout<<"vhat: "<<vhat<<", theta: "<<theta*180.0/M_PI<<endl;
    
        Eigen::Vector3d vhateig = Converter::toVector3d(vhat);
        Eigen::Matrix3d RWIeig = Sophus::SO3::exp(vhateig*theta).matrix();
        cv::Mat Rwi = Converter::toCvMat(RWIeig);
        cv::Mat GI = gI*ConfigParam::GetG();//9.8012;
        // Solve C*x=D for x=[s,dthetaxy,ba] (1+2+3)x1 vector
        cv::Mat C = cv::Mat::zeros(3*(N-2),6,CV_32F);
        cv::Mat D = cv::Mat::zeros(3*(N-2),1,CV_32F);
    
        for(int i=0; i<N-2; i++)
        {
            //KeyFrameInit* pKF1 = vKFInit[i];//vScaleGravityKF[i];
            KeyFrameInit* pKF2 = vKFInit[i+1];
            KeyFrameInit* pKF3 = vKFInit[i+2];
            // Delta time between frames
            double dt12 = pKF2->mIMUPreInt.getDeltaTime();
            double dt23 = pKF3->mIMUPreInt.getDeltaTime();
            // Pre-integrated measurements
            cv::Mat dp12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaP());
            cv::Mat dv12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaV());
            cv::Mat dp23 = Converter::toCvMat(pKF3->mIMUPreInt.getDeltaP());
            cv::Mat Jpba12 = Converter::toCvMat(pKF2->mIMUPreInt.getJPBiasa());
            cv::Mat Jvba12 = Converter::toCvMat(pKF2->mIMUPreInt.getJVBiasa());
            cv::Mat Jpba23 = Converter::toCvMat(pKF3->mIMUPreInt.getJPBiasa());
            // Pose of camera in world frame
            cv::Mat Twc1 = vTwc[i].clone();//pKF1->GetPoseInverse();
            cv::Mat Twc2 = vTwc[i+1].clone();//pKF2->GetPoseInverse();
            cv::Mat Twc3 = vTwc[i+2].clone();//pKF3->GetPoseInverse();
            // Position of camera center
            cv::Mat pc1 = Twc1.rowRange(0,3).col(3);
            cv::Mat pc2 = Twc2.rowRange(0,3).col(3);
            cv::Mat pc3 = Twc3.rowRange(0,3).col(3);
            // Rotation of camera, Rwc
            cv::Mat Rc1 = Twc1.rowRange(0,3).colRange(0,3);
            cv::Mat Rc2 = Twc2.rowRange(0,3).colRange(0,3);
            cv::Mat Rc3 = Twc3.rowRange(0,3).colRange(0,3);
            // Stack to C/D matrix
            // lambda*s + phi*dthetaxy + zeta*ba = psi
            cv::Mat lambda = (pc2-pc1)*dt23 + (pc2-pc3)*dt12;
            cv::Mat phi = - 0.5*(dt12*dt12*dt23 + dt12*dt23*dt23)*Rwi*SkewSymmetricMatrix(GI);  // note: this has a '-', different to paper
            cv::Mat zeta = Rc2*Rcb*Jpba23*dt12 + Rc1*Rcb*Jvba12*dt12*dt23 - Rc1*Rcb*Jpba12*dt23;
            cv::Mat psi = (Rc1-Rc2)*pcb*dt23 + Rc1*Rcb*dp12*dt23 - (Rc2-Rc3)*pcb*dt12
                         - Rc2*Rcb*dp23*dt12 - Rc1*Rcb*dv12*dt23*dt12 - 0.5*Rwi*GI*(dt12*dt12*dt23 + dt12*dt23*dt23); // note:  - paper
            lambda.copyTo(C.rowRange(3*i+0,3*i+3).col(0));
            phi.colRange(0,2).copyTo(C.rowRange(3*i+0,3*i+3).colRange(1,3)); //only the first 2 columns, third term in dtheta is zero, here compute dthetaxy 2x1.
            zeta.copyTo(C.rowRange(3*i+0,3*i+3).colRange(3,6));
            psi.copyTo(D.rowRange(3*i+0,3*i+3));
    
            // Debug log
            //cout<<"iter "<<i<<endl;
        }
    
        // Use svd to compute C*x=D, x=[s,dthetaxy,ba] 6x1 vector
        // C = u*w*vt, u*w*vt*x=D
        // Then x = vt'*winv*u'*D
        cv::Mat w2,u2,vt2;
        // Note w2 is 6x1 vector by SVDecomp()
        // C is changed in SVDecomp() with cv::SVD::MODIFY_A for speed
        cv::SVDecomp(C,w2,u2,vt2,cv::SVD::MODIFY_A);
        // Debug log
        //cout<<"u2:"<<endl<<u2<<endl;
        //cout<<"vt2:"<<endl<<vt2<<endl;
        //cout<<"w2:"<<endl<<w2<<endl;
    
        // Compute winv
        cv::Mat w2inv=cv::Mat::eye(6,6,CV_32F);
        for(int i=0;i<6;i++)
        {
            if(fabs(w2.at<float>(i))<1e-10)
            {
                w2.at<float>(i) += 1e-10;
                // Test log
                cerr<<"w2(i) < 1e-10, w="<<endl<<w2<<endl;
            }
    
            w2inv.at<float>(i,i) = 1./w2.at<float>(i);
        }
        // Then y = vt'*winv*u'*D
        cv::Mat y = vt2.t()*w2inv*u2.t()*D;
    
        double s_ = y.at<float>(0);
        cv::Mat dthetaxy = y.rowRange(1,3);
        cv::Mat dbiasa_ = y.rowRange(3,6);
        Vector3d dbiasa_eig = Converter::toVector3d(dbiasa_);
    
        // dtheta = [dx;dy;0]
        cv::Mat dtheta = cv::Mat::zeros(3,1,CV_32F);
        dthetaxy.copyTo(dtheta.rowRange(0,2));
        Eigen::Vector3d dthetaeig = Converter::toVector3d(dtheta);
        // Rwi_ = Rwi*exp(dtheta)
        Eigen::Matrix3d Rwieig_ = RWIeig*Sophus::SO3::exp(dthetaeig).matrix();
        cv::Mat Rwi_ = Converter::toCvMat(Rwieig_);
    View Code
  • 相关阅读:
    LeetCode 297. 二叉树的序列化与反序列化
    LeetCode 14. 最长公共前缀
    LeetCode 1300. 转变数组后最接近目标值的数组和
    bigo一面凉经
    LeetCode 128.最长连续序列
    LeetCode中二分查找算法的变种
    LeetCode 93. 复原IP地址
    LeetCode 1004. 最大连续1的个数 III
    LeetCode 1282. 用户分组
    多线程理解
  • 原文地址:https://www.cnblogs.com/mybrave/p/9564899.html
Copyright © 2011-2022 走看看