zoukankan      html  css  js  c++  java
  • 使用liner、feather、multiband对已经拼接的数据进行融合

          所谓"blend",英文解释为“vt. 混合vi. 混合;协调n. 混合;掺合物”这里应该理解为是图像数据的融合。这是“识别->对准->融合”的最后一步。融合是决定拼接质量的关键一步,一方面它决定于图像对准的质量,一方面它本身的也直接对拼接的最终结果负责。

         最简单和便于理解的融合为liner,正好借这个例子来说明说明是融合,简单的说,就是在融合的区域……(这个地方引用相关资料)liner在opencv中没有实现,但是本身简单有效,对于要求不是很高的情况可以使用,这里给出函数(再做相关解释)
         #pragma region mulitStitch
    /*----------------------------
     * 功|能 : 多图匹配
     *----------------------------
     * 函数y : MulitMatch
     * 访问 : private
     * 返回 : void
     *
     * 参数y : matinput      [in]     全部需要a匹配图的vector
     * 参数y : matloc1       [ot]     所有D匹配中D对应|于第一图的结果向量
     * 参数y : matloc1       [ot]     所有D匹配中D对应|于第二t图的结果向量
     * 参数y : match_method  [in]     匹配方法
     */
    void MulitMatch(deque<Mat>& matinput,deque<Point>& matloc1,deque<Point>& matloc2, int match_method)
    {
          Mat img_display1;Mat img_display2;
                      Point matchLoc1;Point matchLoc2;
                     for (int i =0;i<matinput.size()-1;i++)
                     {
     
                                     //拷贝副本
                                      img_display1 = matinput[i];
                                      img_display2 = matinput[i+1];
                                     //以中D心区域为aroi
                                     // Mat imagetmp (img_display1, Rect(img_display1.rows/2, img_display1.cols/2, 10, 10) );//11:44:02
                                      
                                     Mat imagetmp (img_display1, Rect(960,240, 10, 10) );//11:44:02
                                     int result_cols =  img_display1.cols - imagetmp.cols + 1;
                                     int result_rows = img_display1.rows - imagetmp.rows + 1;
                                     Mat imagematch;
                                     imagematch.create( result_cols, result_rows, CV_32FC1 );
                                     /// 进行D匹配和标准化
                                     //匹配1
                                     matchTemplate( img_display1, imagetmp, imagematch, match_method );
                                     normalize( imagematch, imagematch, 0, 1, NORM_MINMAX, -1, Mat() );
                                     double minVal; double maxVal;
                                     Point minLoc; Point maxLoc;
                                     minMaxLoc( imagematch, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
                                     //智能判D断,这a里的matchLoc就是最佳匹配点
                                     if( match_method  == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
                                     { matchLoc1 = minLoc; }
                                     else
                                     { matchLoc1 = maxLoc; }
                                    matloc1.push_back(matchLoc1); //加入序列D
                                     //匹配2
                                     matchTemplate( img_display2, imagetmp, imagematch, match_method );
                                     normalize( imagematch, imagematch, 0, 1, NORM_MINMAX, -1, Mat() );
                                     minMaxLoc( imagematch, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
                                     //智能判D断,这a里的matchLoc就是最佳匹配点
                                     if( match_method  == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
                                     { matchLoc2 = minLoc; }
                                     else
                                     { matchLoc2 = maxLoc; }
                                     matloc2.push_back(matchLoc2); //加入序列D
                     }
    }
    /*----------------------------
     * 功|能 : 多图对准
     *----------------------------
     * 函数y : MulitAlign
     * 访问 : private
     * 返回 : Mat
     *
     * 参数y : matinput      [in]     全部需要a匹配图的vector
     * 参数y : matloc1       [ot]     所有D匹配中D对应|于第一图的结果向量
     * 参数y : matloc1       [ot]     所有D匹配中D对应|于第二t图的结果向量
     */
    Mat MulitAlign(deque<Mat>& matinput,deque<Point>& matloc1,deque<Point>& matloc2)
    {              
                    Mat outImage; //待y输出图片
                     //计算图片大小   
                     int nr = matinput[0].rows;
                     int nl = matinput[0].cols*matinput[0].channels();
                     int ioffset = 0;int ioffsetdetail = 0;
                     //计算offset
                     for (int i =0;i<matloc1.size()-1;i++)
                    {
                                    ioffset = ioffset+matloc1[i].y- matloc2[i].y;
                    }
                    outImage.create( matinput[0].rows+ioffset, matinput[0].cols, matinput[0].type());
                     for (int i=0;i<matloc1.size()-1;i++)
                    {
                                     if (i == 0)//如果第一弹
                                    {
                                                     for (int a=0;a<nr;a++) //第一图
                                                    {
                                                                     const uchar* inData=matinput[0].ptr<uchar>(a);
                                                                    uchar* outData=outImage.ptr<uchar>(a); 
                                                                     for(int j=0;j<nl;j++)
                                                                    {
                                                                                    outData[j]=inData[j];           
                                                                    }
                                                    }
     
                                                     for (int b=0;b<nr;b++) //第二t图
                                                    {
                                                                     const uchar* inData=matinput[1].ptr<uchar>(b);
                                                                    uchar* outData=outImage.ptr<uchar>(b+matloc1[0].y-matloc2[0].y); 
                                                                     for(int j=0;j<nl;j++)
                                                                    {
                                                                                    outData[j]=inData[j];           
                                                                    }
                                                    }
                                                    ioffsetdetail += matloc1[0].y-matloc2[0].y;
                                    }
                                     else//如果不是第一弹
                                    {
                                                     for (int b=0;b<nr;b++) 
                                                    {
                                                                     const uchar* inData=matinput[i+1].ptr<uchar>(b);
                                                                    uchar* outData=outImage.ptr<uchar>(b+ioffsetdetail+matloc1[i].y-matloc2[i].y); 
                                                                     for(int j=0;j<nl;j++)
                                                                    {
                                                                                    outData[j]=inData[j];           
                                                                    }
                                                    }
                                                    ioffsetdetail += matloc1[i+1].y-matloc2[i].y;
                                    }              
                                                    
                    }
     
                     return outImage;
    }
    /*----------------------------
     * 功|能 : 多图融合
     *----------------------------
     * 函数y : MulitBlend
     * 访问 : private
     * 返回 : Mat&
     *
     * 参数y : matinput      [in]     图片输入序列D
     * 参数y : imagesrc      [in]     已经-对准的图片
     * 参数y : matloc1                            [in]     第一图匹配位置
     * 参数y : matloc2        [in]     第二t图匹配位置
     */
    Mat MulitBlend(deque<Mat>& matinput, const Mat& imagesrc,deque<Point>& matloc1,deque<Point>& matloc2)
    {              
                    Mat outImage; //待y输出图片
                    imagesrc.copyTo(outImage); //图像拷贝
                     int ioffsetdetail = 0;
                     double dblend = 0.0;
                     for (int i =0;i<matloc1.size()-1;i++)
                    {
                                    dblend = 0.0;
                                     int ioffset = matloc1[i].y - matloc2[i].y;//row的偏移
                                     for (int j = 0;j<100;j++)//这a个地方用i 和 j很不好
                                    {              
                                                    outImage.row(ioffsetdetail+ioffset+j) = matinput[i].row(ioffset+j)*(1-dblend)+ matinput[i+1].row(j)*(dblend);
                                                    dblend = dblend +0.01;
                                    }
                                                    ioffsetdetail += ioffset;
                    }
                     return outImage;
    }
    #pragma endregion mulitStitch
     
    在最新版的opencv中(至少在2.4.5之后),提供了mulitband和feather函数。jsxyhelu认为,总体来说,mulitband是目前最好的融合算法,(paper),在(2007)这篇经典的图像拼接论文中得到引用,需要提及的一点是opencv的stitch函数主要就是基于2007这篇论文实现的,它的算法实现的第一篇引用论文就是2007。当然,好的算法可能用起来比较麻烦,简单的算法也有其适合使用的地方。
    。。。。。。pipleline
    mulitblend的主要思想是小频率事件大空间划分,大频率事件小空间划分,具体内容参考论文。featherblend就是常见的所谓“羽化”,这里主要考虑工程实现。
     
    首先参考image blander函数
    detail::Blender
    class detail::Blender
    Base class for all blenders.
    class CV_EXPORTS Blender
    {
    public:
    virtual ~Blender() {}
    enum { NO, FEATHER, MULTI_blend };
    static Ptr<Blender> createDefault(int type, bool try_gpu = false);
    void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
    virtual void prepare(Rect dst_roi);
    virtual void feed(const Mat &img, const Mat &mask, Point tl);
    virtual void blend(Mat &dst, Mat &dst_mask);
    protected:
    Mat dst_, dst_mask_;
    Rect dst_roi_;
    };
     
    在detail空间中存在blender函数,是所有blender的基函数。可以实现的包括feather和multiblend两种方法。
     
     blender = Blender::createDefault(blend_type, try_gpu);是创建函数,两个参数决定了采用哪一种blender方法,和是否采用gpu
     
     
    detail::Blender::prepare
    为blend准备相关数据
    C++: void detail::Blender::prepare(const std::vector<Point>& corners, const std::vector<Size>&
    sizes)
    Parameters
    corners – 原始图像文件的左上角点
    sizes – 原始文件大小
    注意这里的两点都是vector
     
    detail::Blender::feed
    处理图像
    C++: void detail::Blender::feed(const Mat& img, const Mat& mask, Point tl)
    Parameters
    img –原始图像
    mask – mask
    tl topleft点
    注意这里是一张一张处理图像的
     
    detail::Blender::blend
    处理blend操作,是最后最后输出操作 ,从blend结构体中返回pano全景图像了
    C++: void detail::Blender::blend(Mat& dst, Mat& dst_mask)
    Parameters
    dst – Final pano
    dst_mask – Final pano mask
     
    detail::MultiBandBlender
    class detail::MultiBandBlender : public detail::Blender
    Blender which uses multi-band blending algorithm (see [BA83],就是前面提到的那篇论文).
    class CV_EXPORTS MultiBandBlender : public Blender
    {
    public:
    MultiBandBlender(int try_gpu = false, int num_bands = 5);
    int numBands() const { return actual_num_bands_; }
    void setNumBands(int val) { actual_num_bands_ = val; }
    void prepare(Rect dst_roi);
    void feed(const Mat &img, const Mat &mask, Point tl);
    void blend(Mat &dst, Mat &dst_mask);
    private:
    /* hidden */
    };
    See also:
    detail::Blender
     
    detail::FeatherBlender
    class detail::FeatherBlender : public detail::Blender
    Simple blender which mixes images at its borders.
    class CV_EXPORTS FeatherBlender : public Blender
    {
    public:
    FeatherBlender(float sharpness = 0.02f) { setSharpness(sharpness); }
    float sharpness() const { return sharpness_; }
    void setSharpness(float val) { sharpness_ = val; }
    void prepare(Rect dst_roi);
    void feed(const Mat &img, const Mat &mask, Point tl);
    void blend(Mat &dst, Mat &dst_mask);
    // Creates weight maps for fixed set of source images by their masks and top-left corners.
    // Final image can be obtained by simple weighting of the source images.
    Rect createWeightMaps(const std::vector<Mat> &masks, const std::vector<Point> &corners,
    std::vector<Mat> &weight_maps);
    private:
    /* hidden */
    };
    See also:
    detail::Blender
    这两个函数在文档中都没有详细的解释。这里进行补充说明。
    基于cookbook第9章的estimateH.cpp继续前进
    将其cv::Mat image1= cv::imread( "parliament1.bmp",1);
                    cv::Mat image2= cv::imread( "parliament2.bmp",1);
    结果没有问题,拼接的出来了,但是缝合线也非常明显
     
    将不需要的代码注释掉,需要注意的是,书中的代码认为图片是从右向左移动的
    //需要a注意a的一点是,原-始文件t的图片是按照从右至左边进行D移动的。
                    cv::Point* p1 = new cv::Point(image1.cols,1);
                    cv::Point* p2 = new cv::Point(image1.cols,image2.rows-1);
                    cv::line(result,*p1,*p2,cv::Scalar(255,255,255),2);
                    cv::namedWindow( "After warping0");
                    cv::imshow( "After warping0",result);
    画出一条白线,基本标注位置
     
     
    cv::Mat result;
                    cv::warpPerspective(image1, // input image
                                    result,                                      // output image
                                    homography,                         // homography
                                    cv::Size(2*image1.cols,image1.rows)); // size of output image
                    cv::Mat resultback;
                    result.copyTo(resultback);
     
                     // Copy image 1 on the first half of full image
                    cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
                    image2.copyTo(half);
        // Display the warp image
                    cv::namedWindow( "After warping");
                    cv::imshow( "After warping",result);
                     //需要a注意a的一点是,原-始文件t的图片是按照从右至左边进行D移动的。
    //             cv::Point* p1 = new cv::Point(image1.cols,1);
    //             cv::Point* p2 = new cv::Point(image1.cols,image2.rows-1);
    //             cv::line(result,*p1,*p2,cv::Scalar(255,255,255),2);
    //             cv::namedWindow("After warping0");
    //             cv::imshow("After warping0",result);
                     //进行Dliner的融合
                    Mat outImage; //待y输出图片
                    result.copyTo(outImage); //图像拷贝
                     double dblend = 0.0;
                     int ioffset =image2.cols-100;//col的初始定位
                     for (int i = 0;i<100;i++)
                    {              
                                    outImage.col(ioffset+i) = image2.col(ioffset+i)*(1-dblend) + resultback.col(ioffset+i)*dblend;
                                    dblend = dblend +0.01;
                    }
    需要注意的是这里不是将image1和image2进行融合,而是将原始的result和image进行融合。
    由于背景比较单一,而且图片分辨率不是很高,所以这个结果融合的 结果非常不
     
  • 相关阅读:
    读取XML示例:C#获取XML的数据
    GridView不換行
    List<T> 的条件筛选 where使用方法
    [HDU] 1016 Prime Ring Problem
    [HDU] 1072 Nightmare 和HDU1180有点类似
    [HDU] 1180 诡异的楼梯个人觉得比较有趣的广搜索
    [HDU] 1312Red and Black 用广搜求能探寻到的点的数目
    [HDU] 1026 Ignatius and the Princess I 简单建模后广搜索求最短路径生成树
    [HDU] 1010 Tempter of the Bone最基本的深搜
    [HDU] 1175 连连看 剪枝优化后的性能飙升
  • 原文地址:https://www.cnblogs.com/jsxyhelu/p/3847377.html
Copyright © 2011-2022 走看看