zoukankan      html  css  js  c++  java
  • 【20160924】GOCVHelper 图像增强部分(3)

    //顶帽去光差,radius为模板半径
        Mat moveLightDiff(Mat src,int radius){
            Mat dst;
            Mat srcclone = src.clone();
            Mat mask = Mat::zeros(radius*2,radius*2,CV_8U);
            circle(mask,Point(radius,radius),radius,Scalar(255),-1);
            //顶帽
            erode(srcclone,srcclone,mask);
            dilate(srcclone,srcclone,mask);
            dst =  src - srcclone;
            return dst;

        }

    算法来自于冈萨雷斯《数字图像处理教程》形态学篇章。完全按照教程实现,具备一定作用。
     
        //将 DEPTH_8U型二值图像进行细化  经典的Zhang并行快速细化算法
        //细化算法
        void thin(const Mat &srcMat &dstconst int iterations){
            const int height =src.rows -1;
            const int width  =src.cols -1;
            //拷贝一个数组给另一个数组
            if(src.data != dst.data)
                src.copyTo(dst);
            int n = 0,i = 0,j = 0;
            Mat tmpImg;
            uchar *pU, *pC, *pD;
            bool isFinished =FALSE;
            for(n=0; n<iterationsn++){
                dst.copyTo(tmpImg); 
                isFinished =FALSE;   //一次 先行后列扫描 开始
                //扫描过程一 开始
                for(i=1; i<height;  i++) {
                    pU = tmpImg.ptr<uchar>(i-1);
                    pC = tmpImg.ptr<uchar>(i);
                    pD = tmpImg.ptr<uchar>(i+1);
                    for(int j=1; j<widthj++){
                        if(pC[j] > 0){
                            int ap=0;
                            int p2 = (pU[j] >0);
                            int p3 = (pU[j+1] >0);
                            if (p2==0 && p3==1)
                                ap++;
                            int p4 = (pC[j+1] >0);
                            if(p3==0 && p4==1)
                                ap++;
                            int p5 = (pD[j+1] >0);
                            if(p4==0 && p5==1)
                                ap++;
                            int p6 = (pD[j] >0);
                            if(p5==0 && p6==1)
                                ap++;
                            int p7 = (pD[j-1] >0);
                            if(p6==0 && p7==1)
                                ap++;
                            int p8 = (pC[j-1] >0);
                            if(p7==0 && p8==1)
                                ap++;
                            int p9 = (pU[j-1] >0);
                            if(p8==0 && p9==1)
                                ap++;
                            if(p9==0 && p2==1)
                                ap++;
                            if((p2+p3+p4+p5+p6+p7+p8+p9)>1 && (p2+p3+p4+p5+p6+p7+p8+p9)<7){
                                if(ap==1){
                                    if((p2*p4*p6==0)&&(p4*p6*p8==0)){                           
                                        dst.ptr<uchar>(i)[j]=0;
                                        isFinished =TRUE;                            
                                    }
                                }
                            }                    
                        }
     
                    } //扫描过程一 结束
                    dst.copyTo(tmpImg); 
                    //扫描过程二 开始
                    for(i=1; i<height;  i++){
                        pU = tmpImg.ptr<uchar>(i-1);
                        pC = tmpImg.ptr<uchar>(i);
                        pD = tmpImg.ptr<uchar>(i+1);
                        for(int j=1; j<widthj++){
                            if(pC[j] > 0){
                                int ap=0;
                                int p2 = (pU[j] >0);
                                int p3 = (pU[j+1] >0);
                                if (p2==0 && p3==1)
                                    ap++;
                                int p4 = (pC[j+1] >0);
                                if(p3==0 && p4==1)
                                    ap++;
                                int p5 = (pD[j+1] >0);
                                if(p4==0 && p5==1)
                                    ap++;
                                int p6 = (pD[j] >0);
                                if(p5==0 && p6==1)
                                    ap++;
                                int p7 = (pD[j-1] >0);
                                if(p6==0 && p7==1)
                                    ap++;
                                int p8 = (pC[j-1] >0);
                                if(p7==0 && p8==1)
                                    ap++;
                                int p9 = (pU[j-1] >0);
                                if(p8==0 && p9==1)
                                    ap++;
                                if(p9==0 && p2==1)
                                    ap++;
                                if((p2+p3+p4+p5+p6+p7+p8+p9)>1 && (p2+p3+p4+p5+p6+p7+p8+p9)<7){
                                    if(ap==1){
                                        if((p2*p4*p8==0)&&(p2*p6*p8==0)){                           
                                            dst.ptr<uchar>(i)[j]=0;
                                            isFinished =TRUE;                            
                                        }
                                    }
                                }                    
                            }
                        }
                    } //一次 先行后列扫描完成          
                    //如果在扫描过程中没有删除点,则提前退出
                    if(isFinished ==FALSE)
                        break
                }
            }
        }
    #end of thin
    细化算法,在处理毛笔字一类的时候效果很好。使用的过程中,注意需要保留的部分要处理为白色,也就是scalar(255)
     





  • 相关阅读:
    Java多线程编程模式实战指南(一):Active Object模式--转载
    Improving Lock Performance in Java--reference
    The 10 Most Important Security Controls Missing in JavaEE--reference
    ES索引文件和数据文件大小对比——splunk索引文件大小远小于ES,数据文件的压缩比也较ES更低,有趣的现象:ES数据文件zip压缩后大小和splunk的数据文件相当!词典文件tim/tip+倒排doc/pos和cfs文件是索引的大头
    Lucene4.2源码解析之fdt和fdx文件的读写(续)——fdx文件存储一个个的Block,每个Block管理着一批Chunk,通过docID读取到document需要完成Segment、Block、Chunk、document四级查询,引入了LZ4算法对fdt的chunk docs进行了实时压缩/解压
    Lucene4.2源码解析之fdt和fdx文件的读写——fdx文件存储一个个的Block,每个Block管理着一批Chunk,通过docID读取到document需要完成Segment、Block、Chunk、document四级查询,引入了LZ4算法对fdt的chunk docs进行了实时压缩/解压
    lucene反向索引——倒排表无论是文档号及词频,还是位置信息,都是以跳跃表的结构存在的
    lucene正向索引(续)——一个文档的所有filed+value都在fdt文件中!!!
    Choosing a fast unique identifier (UUID) for Lucene——有时间再看下
    Lucene核心数据结构——FST存词典,跳表存倒排或者roarning bitmap 见另外一个文章
  • 原文地址:https://www.cnblogs.com/jsxyhelu/p/5907544.html
Copyright © 2011-2022 走看看