zoukankan      html  css  js  c++  java
  • PL-SVO公式推导及代码解析:地图点重投影和特征对齐

    对当前帧进行地图点重投影和特征对齐

      // map reprojection & feature alignment
      SVO_START_TIMER("reproject");
      reprojector_.reprojectMap(new_frame_, overlap_kfs_);
      SVO_STOP_TIMER("reproject");

    在processframe函数中在进行初始的稀疏图像对齐之后,进一步进行地图投影和特征对齐,对新一帧图像添加特征点,由reprojectMap接口进入

    Reprojector reprojector_; //把其它关键帧中的点投影到当前帧中

    FramePtr new_frame_; // 当前帧
    vector< pair<FramePtr,size_t> > overlap_kfs_;  // 具有重叠视野的所有关键帧。 配对数字指明观察到的公共地图点数。

    void Reprojector::reprojectMap(
        FramePtr frame,
        std::vector< std::pair<FramePtr,std::size_t> >& overlap_kfs)
    {
      resetReprojGrid();
    resetReprojGrid(); // 重置函数
      // Identify those Keyframes which share a common field of view.
      SVO_START_TIMER("reproject_kfs");
      list< pair<FramePtr,double> > close_kfs;
      map_.getCloseKeyframes(frame, close_kfs);
    
    

    void Map::getCloseKeyframes(
    const FramePtr& frame,
    std::list< std::pair<FramePtr,double> >& close_kfs);

    const FramePtr& frame;    //当前图像帧

    std::list< std::pair<FramePtr,double> >& close_kfs  ;///存储和当前帧接近的关键帧指针以及它们之间的距离。


    根据它们的接近程度对KF进行重叠排序(列表中的第二个值)
      close_kfs.sort(boost::bind(&std::pair<FramePtr, double>::second, _1) <
                     boost::bind(&std::pair<FramePtr, double>::second, _2));
    
    

    重新投影具有重叠的最近N kfs的所有地图特征。

      size_t n_kfs = 0;
      overlap_kfs.reserve(options_.max_n_kfs);
      for(auto it_frame=close_kfs.begin(), ite_frame=close_kfs.end();
          it_frame!=ite_frame && n_kfs<options_.max_n_kfs; ++it_frame, ++n_kfs)
      {
        FramePtr ref_frame = it_frame->first;
        // add the current frame to the (output) list of keyframes with overlap
        // initialize the counter of candidates from this frame (2nd element in pair) to zero
        overlap_kfs.push_back(pair<FramePtr,size_t>(ref_frame,0));
        // Consider for candidate each mappoint in the ref KF that the current (input) KF observes
        // We only store in which grid cell the points fall.
        // Add each corresponding valid new Candidate to its cell in the grid.
        int num_pt_success = setKfCandidates( frame, ref_frame->pt_fts_ );
        overlap_kfs.back().second += num_pt_success;
        // Add each line segment in the ref KF that the current (input) KF observes
        int num_seg_success = setKfCandidates( frame, ref_frame->seg_fts_ );
        overlap_kfs.back().second += num_seg_success;
      }
      SVO_STOP_TIMER("reproject_kfs");
    考虑当前(输入)KF观察到的参考KF中的每个mappoint的候选者

    我们只存储点落在哪个网格单元格中。

    将每个对应的有效新Candidate添加到网格中的单元格中。

    template<class FeatureT>
    int Reprojector::setKfCandidates(FramePtr frame, list<FeatureT*> fts)
    {
      int candidate_counter = 0;
      for(auto it=fts.begin(), ite_ftr=fts.end(); it!=ite_ftr; ++it)
      {
        // check if the feature has a 3D object assigned
        if((*it)->feat3D == NULL)
          continue;
        // make sure we project a point only once
        if((*it)->feat3D->last_projected_kf_id_ == frame->id_)
          continue;
        (*it)->feat3D->last_projected_kf_id_ = frame->id_;
        if(reproject(frame, (*it)->feat3D))
          // increment the number of candidates taken successfully
          candidate_counter++;
      }
      return candidate_counter;
    }

    int Reprojector::setKfCandidates(FramePtr frame, list<FeatureT*> fts)

    把地图点和投影到当前帧中的像素点的匹配对存储到  grid_.cells 中

     
    bool Reprojector::reproject(FramePtr frame, Point* point)
    {
      // get position in current frame image of the world 3D point
      Vector2d cur_px(frame->w2c(point->pos_));
      if(frame->cam_->isInFrame(cur_px.cast<int>(), 8)) // 8px is the patch size in the matcher
      {
        // get linear index (wrt row-wise vectorized grid matrix)
        // of the image grid cell in which the point px lies
        const int k = static_cast<int>(cur_px[1]/grid_.cell_size)*grid_.grid_n_cols
                    + static_cast<int>(cur_px[0]/grid_.cell_size);
        grid_.cells.at(k)->push_back(PointCandidate(point, cur_px));
        return true;
      }
      return false;
    }
    
    

    数据类型的定义

      struct PointCandidate {
        EIGEN_MAKE_ALIGNED_OPERATOR_NEW
        Point* pt;       //!< 3D point.
        Vector2d px;     //!< projected 2D pixel location.
        PointCandidate(Point* pt, Vector2d& px) : pt(pt), px(px) {}
      };
      // A cell is just a list of Reprojector::Candidate
      typedef std::list<PointCandidate, aligned_allocator<PointCandidate> > Cell;
      typedef std::vector<Cell*> CandidateGrid;
    
      struct LineCandidate {
        EIGEN_MAKE_ALIGNED_OPERATOR_NEW
        LineSeg* ls;       //!< 3D point.
        Vector2d spx;
        Vector2d epx;
        LineCandidate(LineSeg* ls, Vector2d& spx, Vector2d& epx) : ls(ls), spx(spx), epx(epx) {}
      };
      /// The candidate segments are collected in a single list for the whole image.
      /// There is no clear heuristic about how to discretize the image space for the segment case.
      typedef std::list<LineCandidate, aligned_allocator<LineCandidate> > LineCandidates;
      typedef std::vector<LineCandidates*> LineCandidateGrid;
    
      /// The grid stores a set of candidate matches. For every grid cell we try to find one match.
      struct Grid
      {
        CandidateGrid cells;
        vector<int> cell_order;
        int cell_size;
        int grid_n_cols;
        int grid_n_rows;
      };
    
      struct GridLs
      {
        LineCandidateGrid cells;
        vector<int> cell_order;
        int cell_size;
        int grid_n_cols;
        int grid_n_rows;
      };
    
      Grid    grid_;
      GridLs  gridls_;
      Matcher matcher_;
      Map&    map_;

    现在投影所有地图坐标

    点坐标

      // Now project all map candidates
      // (candidates in the map are created from converged seeds)
      SVO_START_TIMER("reproject_candidates");
      // Point candidates
      // (same logic as above to populate the cell grid but taking candidate points from the map object)
      setMapCandidates(frame, map_.point_candidates_);

    线坐标

      // Segment candidates
      setMapCandidates(frame, map_.segment_candidates_);
      SVO_STOP_TIMER("reproject_candidates");

      Map&    map_;  //地图类

     MapPointCandidates point_candidates_;   //  收敛的3D点的容器,尚未分配给两个关键帧。

    MapSegmentCandidates segment_candidates_;    /////尚未分配给两个关键帧的融合3D点的容器。

    class MapPointCandidates
    {
    public:
      typedef pair<Point*, PointFeat*> PointCandidate;
      typedef list<PointCandidate> PointCandidateList;
    
      /// The depth-filter is running in a parallel thread and fills the canidate list.
      /// This mutex controls concurrent access to point_candidates.
      boost::mutex mut_;
    
      /// Candidate points are created from converged seeds.
      /// Until the next keyframe, these points can be used for reprojection and pose optimization.
      PointCandidateList candidates_;
      list< Point* > trash_points_;
    
      MapPointCandidates();
      ~MapPointCandidates();
    
      /// Add a candidate point.
      void newCandidatePoint(Point* point, double depth_sigma2);
    
      /// Adds the feature to the frame and deletes candidate from list.
      void addCandidatePointToFrame(FramePtr frame);
    
      /// Remove a candidate point from the list of candidates.
      bool deleteCandidatePoint(Point* point);
    
      /// Remove all candidates that belong to a frame.
      void removeFrameCandidates(FramePtr frame);
    
      /// Reset the candidate list, remove and delete all points.
      void reset();
    
      void deleteCandidate(PointCandidate& c);
    
      void emptyTrash();
    };
    void MapPointCandidates::deleteCandidate(PointCandidate& c)
    {
      // camera-rig: another frame might still be pointing to the candidate point
      // therefore, we can't delete it right now.
      delete c.second; c.second=NULL;
      c.first->type_ = Point::TYPE_DELETED;
      trash_points_.push_back(c.first);
    }
    
    
    template<class MapCandidatesT>
    void Reprojector::setMapCandidates(FramePtr frame, MapCandidatesT &map_candidates)
    {
      boost::unique_lock<boost::mutex> lock(map_candidates.mut_); // the mutex will be unlocked when out of scope
      auto it=map_candidates.candidates_.begin();
      while(it!=map_candidates.candidates_.end())
      {
        if(!reproject(frame, it->first))
        {
          // if the reprojection of the map candidate point failed,
          // increment the counter of failed reprojections (assess the point quality)
          it->first->n_failed_reproj_ += 3;
          if(it->first->n_failed_reproj_ > 30)
          {
            // if the reprojection failed too many times, remove the map candidate point
            map_candidates.deleteCandidate(*it);
            it = map_candidates.candidates_.erase(it);
            continue;
          }
        }
        ++it;
      } // end-while-loop
    }
    
    

    现在我们浏览每个网格单元并选择一个匹配点。最后,我们每个细胞最多应该有一个重新投射点。


      SVO_START_TIMER("feature_align");
      for(size_t i=0; i<grid_.cells.size(); ++i)
      {
        // we prefer good quality points over unkown quality (more likely to match)
        // and unknown quality over candidates (position not optimized)
        // we use the random cell order to visit cells uniformly on the grid
        if(refineBestCandidate(*grid_.cells.at(grid_.cell_order[i]), frame))
          ++n_matches_;
        if(n_matches_ > (size_t) Config::maxFts())
          break; // the random visit order of cells assures uniform distribution
                 // of the features even if we break early (maxFts reached soon)
      }

    我们更喜欢质量好的点而不是未知质量(更可能匹配)和未知质量而不是候选者(位置未优化)

    我们使用随机单元格顺序在网格上统一访问单元格

    
    
    bool Reprojector::refineBestCandidate(Cell& cell, FramePtr frame)
    {
      // sort the candidates inside the cell according to their quality
      cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));
      Cell::iterator it=cell.begin();
      // in principle, iterate through the whole list of features in the cell
      // in reality, there is maximum one point per cell, so the loop returns if successful
      while(it!=cell.end())
      {
        ++n_trials_;
        // Try to refine the point feature in frame from current initial estimate
        bool success = refine( it->pt, it->px, frame );
        // Failed or not, this candidate was finally being erased in original code
        it = cell.erase(it); // it takes next position in the list as output of .erase
        if(success)
          // Maximum one point per cell.
          return true;
      }
      return false;
    }
    
    

    尝试从当前初始估计中细化帧中的点要素

    bool Reprojector::refine(Point* pt, Vector2d& px_est, FramePtr frame)
    {
      if(pt->type_ == Point::TYPE_DELETED)
        return false;
    
      bool found_match = true;
      if(options_.find_match_direct)
        // refine px position in the candidate by directly applying subpix refinement
        // internally, it is optimizing photometric error
        // of the candidate px patch wrt the closest-view reference feature patch
        found_match = matcher_.findMatchDirect(*pt, *frame, px_est);
      // TODO: What happens if options_.find_match_direct is false??? Shouldn't findEpipolarMatchDirect be here?
    
      // apply quality logic
      {
        if(!found_match)
        {
          // if there is no match found for this point, decrease quality
          pt->n_failed_reproj_++;
          // consider removing the point from map depending on point type and quality
          if(pt->type_ == Point::TYPE_UNKNOWN && pt->n_failed_reproj_ > 15)
            map_.safeDeletePoint(pt);
          if(pt->type_ == Point::TYPE_CANDIDATE  && pt->n_failed_reproj_ > 30)
            map_.point_candidates_.deleteCandidatePoint(pt);
          return false;
        }
        // if there is successful match found for this point, increase quality
        pt->n_succeeded_reproj_++;
        if(pt->type_ == Point::TYPE_UNKNOWN && pt->n_succeeded_reproj_ > 10)
          pt->type_ = Point::TYPE_GOOD;
      }
    
      // create new point feature for this frame with the refined (aligned) candidate position in this image
      PointFeat* new_feature = new PointFeat(frame.get(), px_est, matcher_.search_level_);
      frame->addFeature(new_feature);
    
      // Here we add a reference in the feature to the 3D point, the other way
      // round is only done if this frame is selected as keyframe.
      // TODO: why not give it directly to the constructor PointFeat(frame.get(), pt, it->px, matcher_.serach_level_)
      new_feature->feat3D = pt;
    
      PointFeat* pt_ftr = static_cast<PointFeat*>( matcher_.ref_ftr_ );
      if(pt_ftr != NULL)
      {
        if(pt_ftr->type == PointFeat::EDGELET)
        {
          new_feature->type = PointFeat::EDGELET;
          new_feature->grad = matcher_.A_cur_ref_*pt_ftr->grad;
          new_feature->grad.normalize();
        }
      }
    
      // If the keyframe is selected and we reproject the rest, we don't have to
      // check this point anymore.
    //  it = cell.erase(it);
    
      // Maximum one point per cell.
      return true;
    }

    通过直接应用子像素细化来细化候选者中的px位置

    在内部,它正在优化最近视图参考特征补丁的候选px补丁的光度误差

    bool Matcher::findMatchDirect(
        const Point& pt,
        const Frame& cur_frame,
        Vector2d& px_cur)
    {
      // get reference feature in closest view (frame)
      if(!pt.getCloseViewObs(cur_frame.pos(), ref_ftr_))
        return false;
    
      if(!ref_ftr_->frame->cam_->isInFrame(
          ref_ftr_->px.cast<int>()/(1<<ref_ftr_->level), halfpatch_size_+2, ref_ftr_->level))
        return false;
    
      // warp affine
      warp::getWarpMatrixAffine(
          *ref_ftr_->frame->cam_, *cur_frame.cam_, ref_ftr_->px, ref_ftr_->f,
          (ref_ftr_->frame->pos() - pt.pos_).norm(),
          cur_frame.T_f_w_ * ref_ftr_->frame->T_f_w_.inverse(), ref_ftr_->level, A_cur_ref_);
      search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-1);
      // is img of ref_frame fully available at any time? that means keeping stored previous images, for how long?
      warp::warpAffine(A_cur_ref_, ref_ftr_->frame->img_pyr_[ref_ftr_->level], ref_ftr_->px,
                       ref_ftr_->level, search_level_, halfpatch_size_+1, patch_with_border_);
      // patch_with_border_ stores the square patch (of pixel intensities) around the reference feature
      // once the affine transformation is applied to the original reference image
      // the border is necessary for gradient operations (intensities at the border must be precomputed by interpolation too!)
      createPatchFromPatchWithBorder();
    
      // px_cur should be set
      Vector2d px_scaled(px_cur/(1<<search_level_));
    
      bool success = false;
      PointFeat* pt_ftr = static_cast<PointFeat*>(ref_ftr_);
      if(pt_ftr->type == PointFeat::EDGELET)
      {
        Vector2d dir_cur(A_cur_ref_*pt_ftr->grad);
        dir_cur.normalize();
        success = feature_alignment::align1D(
              cur_frame.img_pyr_[search_level_], dir_cur.cast<float>(),
              patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_);
      }
      else
      {
        success = feature_alignment::align2D(
          cur_frame.img_pyr_[search_level_], patch_with_border_, patch_,
          options_.align_max_iter, px_scaled);
      }
      px_cur = px_scaled * (1<<search_level_);
      return success;
    }

    特征细化

    bool align2D(
        const cv::Mat& cur_img,
        uint8_t* ref_patch_with_border,
        uint8_t* ref_patch,
        const int n_iter,
        Vector2d& cur_px_estimate,
        bool no_simd)
    {
    #ifdef __ARM_NEON__
      if(!no_simd)
        return align2D_NEON(cur_img, ref_patch_with_border, ref_patch, n_iter, cur_px_estimate);
    #endif
    
      const int patch_size_ = 8;
      Patch patch( patch_size_, cur_img );
      bool converged=false;
    
      /* Precomputation step: derivatives in reference patch */
      // compute derivative of template and prepare inverse compositional
      float __attribute__((__aligned__(16))) ref_patch_dx[patch.area];
      float __attribute__((__aligned__(16))) ref_patch_dy[patch.area];
      Matrix3f H; H.setZero();
    
      // compute gradient and hessian
      const int ref_step = patch_size_+2; // assumes ref_patch_with_border comes from a specific Mat object with certain size!!! Bad way to do it?
      float* it_dx = ref_patch_dx;
      float* it_dy = ref_patch_dy;
      for(int y=0; y<patch_size_; ++y)
      {
        uint8_t* it = ref_patch_with_border + (y+1)*ref_step + 1;
        for(int x=0; x<patch_size_; ++x, ++it, ++it_dx, ++it_dy)
        {
          Vector3f J;
          J[0] = 0.5 * (it[1] - it[-1]);
          J[1] = 0.5 * (it[ref_step] - it[-ref_step]);
          J[2] = 1;
          *it_dx = J[0];
          *it_dy = J[1];
          H += J*J.transpose();
        }
      }
      Matrix3f Hinv = H.inverse();
      float mean_diff = 0;
    
      /* Iterative loop: residues and updates with patch in current image */
      // Compute pixel location in new image:
      float u = cur_px_estimate.x();
      float v = cur_px_estimate.y();
    
      // termination condition
      const float min_update_squared = 0.03*0.03;
      const int cur_step = cur_img.step.p[0];
    //  float chi2 = 0;
      Vector3f update; update.setZero();
      for(int iter = 0; iter<n_iter; ++iter)
      {
        // TODO very rarely this can happen, maybe H is singular? should not be at corner.. check
    //    if(isnan(cur_px_estimate[0]) || isnan(cur_px_estimate[1]))
    //      return false;
    
        // set patch position for current feature location
        patch.setPosition( cur_px_estimate );
        // abort the optimization if the patch does not fully lie within the image
        if(!patch.isInFrame(patch.halfsize))
          break;
        // compute interpolation weights
        patch.computeInterpWeights();
    
        // set ROI in the current image to traverse
        patch.setRoi();
        // loop through search_patch, interpolate
        uint8_t* it_ref = ref_patch;
        float* it_ref_dx = ref_patch_dx;
        float* it_ref_dy = ref_patch_dy;
        uint8_t* ptr; // pointer that will point to memory locations of the ROI (same memory as for the original full cur_img)
    //    float new_chi2 = 0.0;
        Vector3f Jres; Jres.setZero();
        for(int y=0; y<patch.size; ++y)
        {
          // get the pointer to first element in row y of the patch ROI
          ptr = patch.roi.ptr(y);
          for(int x=0; x<patch.size; ++x, ++ptr, ++it_ref, ++it_ref_dx, ++it_ref_dy)
          {
            float search_pixel = patch.wTL*ptr[0] + patch.wTR*ptr[1] + patch.wBL*ptr[cur_step] + patch.wBR*ptr[cur_step+1];
            float res = search_pixel - *it_ref + mean_diff;
            Jres[0] -= res*(*it_ref_dx);
            Jres[1] -= res*(*it_ref_dy);
            Jres[2] -= res;
    //        new_chi2 += res*res;
          }
        }
    
    
    /*
        if(iter > 0 && new_chi2 > chi2)
        {
    #if SUBPIX_VERBOSE
          cout << "error increased." << endl;
    #endif
          u -= update[0];
          v -= update[1];
          break;
        }
        chi2 = new_chi2;
    */
        update = Hinv * Jres;
        u += update[0];
        v += update[1];
        cur_px_estimate = Vector2d(u,v);
        mean_diff += update[2];
    
    #if SUBPIX_VERBOSE
        cout << "Iter " << iter << ":"
             << "	 u=" << u << ", v=" << v
             << "	 update = " << update[0] << ", " << update[1]
    //         << "	 new chi2 = " << new_chi2 << endl;
    #endif
    
        if(update[0]*update[0]+update[1]*update[1] < min_update_squared)
        {
    #if SUBPIX_VERBOSE
          cout << "converged." << endl;
    #endif
          converged=true;
          break;
        }
      }
    
      cur_px_estimate << u, v;
      return converged;
    }

    //为当前创建新的点特征,并在此图像中使用精确(对齐)的位置坐标

      PointFeat* new_feature = new PointFeat(frame.get(), px_est, matcher_.search_level_);
      frame->addFeature(new_feature);

    在这里,我们将特征中的引用添加到3D点,只有在选择此帧作为关键帧时才进行另一种方式。

    
    
      if(pt_ftr != NULL)
      {
        if(pt_ftr->type == PointFeat::EDGELET)
        {
          new_feature->type = PointFeat::EDGELET;
          new_feature->grad = matcher_.A_cur_ref_*pt_ftr->grad;
          new_feature->grad.normalize();
        }
      }
    
    

    尝试优化每个细分线段坐标

    即使我们提前破坏,细胞的随机访问顺序也能确保特征的均匀分布(maxFts即将到达)

      for(size_t i=0; i<gridls_.cells.size(); ++i)
      {
        if(refineBestCandidate(*gridls_.cells.at(gridls_.cell_order[i]), frame))
          ++n_ls_matches_;
        if(n_ls_matches_ > (size_t) Config::maxFtsSegs())
          break; // the random visit order of cells assures uniform distribution
                 // of the features even if we break early (maxFts reached soon)
      }
      /*for(auto it = lines_.begin(), ite = lines_.end(); it!=ite; ++it)
      {
        if(refine(it->ls,it->spx,it->epx,frame))
          ++n_ls_matches_;
        if(n_ls_matches_ > (size_t) Config::maxFtsSegs())
          break;
      }*/
      SVO_STOP_TIMER("feature_align");

    特征细化结束


     

     
  • 相关阅读:
    从零开始学Bootstrap
    CSS VS JS动画,哪个更快
    css定位(positon)
    json
    使用 CSS3 绘制 Hello Kitty
    后台找到repeater里面的div并添加客户端点击事件
    dropDownList之"请选择",同时设置默认选项
    asp.net 后台对话框,确认跳转
    后台生成textbox并设置多行属性,自动换行
    asp.net多图片上传同时保存对每张图片的描述
  • 原文地址:https://www.cnblogs.com/feifanrensheng/p/10720712.html
Copyright © 2011-2022 走看看