zoukankan      html  css  js  c++  java
  • 计算机视觉-图像描述符(图片分类)

    1、图像描述符、特征描述符和特征向量的定义

    特征向量:用于表示和量化图像的数字列表,简单理解成将图片转化为一个数字列表表示。特征向量中用来描述图片的各种属性的向量称为特征矢量。

    图像描述符:理解成一种算法和方法,控制整个图像如何转变为特征向量。量化是的图像形状,颜色,纹理,或三者的任何组合。输入1个图像时,图像描述符将返回1个特征向量。主要用于图像分类。缺乏区分图像中不同对象的能力。

    特征描述符:是一种算法和方法,控制图像部分区域,对部分区域返回多个特征向量。输入1个图像,返回多个特征向量(主要用来处理图像的局部)。主要用于图像匹配(视觉检测),匹配图像中的物品。

    2、色彩通道统计

    运行命令:python color_channel_stats.py

    原理:通过统计图像中的每个色彩通道(及RGB色彩通道),凭据通道值和标准差等方法,量化和表示图像的颜色分布,从而对图像进行分类。(主要运用颜色差异名义,不同场景图片分类)

    from scipy.spatial import distance as dist
    from imutils import paths
    import numpy as np
    import cv2
     
    imagePaths = sorted(list(paths.list_images("dinos")))
    index = {}
     
    for imagePath in imagePaths:
        image = cv2.imread(imagePath)
        filename = imagePath[imagePath.rfind("/") + 1:]
    
        (means, stds) = cv2.meanStdDev(image)#计算图像中每个色彩通道的平均值和标准偏差
        features = np.concatenate([means, stds]).flatten()#将每个色彩通道的平均值和标准偏差连接在一起,形成我们的特征向量
        index[filename] = features
    query = cv2.imread(imagePaths[0])
    cv2.imshow("Query (trex_01.png)", query)
    keys = sorted(index.keys())
     
    for (i, k) in enumerate(keys):
        if k == "trex_01.png":
            continue
    
        image = cv2.imread(imagePaths[i])
        d = dist.euclidean(index["trex_01.png"], index[k])#计算目标图像特征向量与我们数据集中的特征向量之间的欧几里德距离。d越小,颜色通道越相似,图片越相似。
     
        cv2.putText(image, "%.2f" % (d), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
        cv2.imshow(k, image)
    
    cv2.waitKey(0)

     3、彩色直方图

    原理:通过颜色的分布,运用机器学习中的K均值聚类运用于聚类颜色直方图。无参考值进行图像分类。(关键是需要对颜色直方图理解)

    文件结构:

    |--- pyimagesearch

    |    |--- __init__.py
    |    |--- descriptors
    |    |    |---- __init__.py
    |    |    |--- labhistogram.py
    |--- cluster_histograms.py
     
    疑问:1、imutils.is_cv2() 意义?
    labhistogram.py
    import cv2
    import imutils
     
    class LabHistogram:
        def __init__(self, bins):
            self.bins = bins
     
        def describe(self, image, mask=None):
            lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
            hist = cv2.calcHist([lab], [0, 1, 2], mask, self.bins,
                [0, 256, 0, 256, 0, 256])#获取3D直方图
            #将图片大小标准化,忽略图片大小对直方图的影响
            if imutils.is_cv2():
                hist = cv2.normalize(hist).flatten()
     
            else:
                hist = cv2.normalize(hist,hist).flatten()
     
            return hist    

    cluster_histograms.py

    from pyimagesearch.descriptors.labhistogram import LabHistogram
    from sklearn.cluster import KMeans
    from imutils import paths
    import numpy as np
    import argparse
    import cv2
    
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--dataset", required=True,
        help="path to the input dataset directory")
    ap.add_argument("-k", "--clusters", type=int, default=2,
        help="# of clusters to generate")#默认设置k值为2,及将图片分为俩类。
    args = vars(ap.parse_args())
    
    desc = LabHistogram([8, 8, 8])
    data = []
     
    imagePaths = list(paths.list_images(args["dataset"]))
    imagePaths = np.array(sorted(imagePaths))
     
    for imagePath in imagePaths:
        image = cv2.imread(imagePath)
        hist = desc.describe(image)
        data.append(hist)#描述符加入到数据集中
     
    #对描述符进行聚类
    clt = KMeans(n_clusters=args["clusters"])
    labels = clt.fit_predict(data)
    
    for label in np.unique(labels):
        #获取每个集群的唯一ID,进行分类
        labelPaths = imagePaths[np.where(labels == label)]
       #将同一集群的图片输出显示
        for (i, path) in enumerate(labelPaths):
            image = cv2.imread(path)
            cv2.imshow("Cluster {}, Image #{}".format(label + 1, i + 1), image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

     4、胡默斯

    作用:用于物品形状检测,处理二值图片,提取图片中物体的形状。

    运行命令:python extract_hu_moments.py

    extract_hu_moments.py

    import cv2
    import imutils
     
    image = cv2.imread("planes.png")
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    moments = cv2.HuMoments(cv2.moments(image)).flatten()
    print("ORIGINAL MOMENTS: {}".format(moments))
    cv2.imshow("Image", image)
    cv2.waitKey(0)
    
    #找到图片中每个物件的行轮廓
    cnts = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
     
    for (i, c) in enumerate(cnts):
        (x, y, w, h) = cv2.boundingRect(c)
        roi = image[y:y + h, x:x + w]
        #提取每个形状的hu_monebts值
        moments = cv2.HuMoments(cv2.moments(roi)).flatten()
    
        print("MOMENTS FOR PLANE #{}: {}".format(i + 1, moments))
        cv2.imshow("ROI #{}".format(i + 1), roi)
        cv2.waitKey(0)

    作用:随机生成数据集

    运行命令:python generate_images.py --output output

    genetate_images.py

    import numpy as np
    import argparse
    import uuid
    import cv2
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-o", "--output", required=True,
        help="Path to the output directory")
    ap.add_argument("-n", "--num-images", type=int, default=500,
        help="# of disctrator images to generate")
    args = vars(ap.parse_args())
     
    for i in range(0, args["num_images"]):
        image = np.zeros((500, 500, 3), dtype="uint8")
        (x, y) = np.random.uniform(low=105, high=405, size=(2,)).astype("int0")
        r = np.random.uniform(low=25, high=100, size=(1,)).astype("int0")[0]
    
        color = np.random.uniform(low=0, high=255, size=(3,)).astype("int0")
        color = tuple(map(int, color))
        cv2.circle(image, (x, y), r, color, -1)
        cv2.imwrite("{}/{}.jpg".format(args["output"], uuid.uuid4()), image)
    image = np.zeros((500, 500, 3), dtype="uint8")
    topLeft = np.random.uniform(low=25, high=225, size=(2,)).astype("int0")
    botRight = np.random.uniform(low=250, high=400, size=(2,)).astype("int0")
     
    color = np.random.uniform(low=0, high=255, size=(3,)).astype("int0")
    color = tuple(map(int, color))
    cv2.rectangle(image, tuple(topLeft), tuple(botRight), color, -1)
    cv2.imwrite("{}/{}.jpg".format(args["output"], uuid.uuid4()), image)

    作用:从数据集中,找出异常形状

    运行命令:python find_rectangle.py --dataset output

    find_rectangle.py

    from sklearn.metrics.pairwise import pairwise_distances
    import numpy as np
    import argparse
    import glob
    import cv2
    import imutils
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--dataset", required=True, help="Path to the dataset directory")
    args = vars(ap.parse_args())
     
    imagePaths = sorted(glob.glob(args["dataset"] + "/*.jpg"))
    data = []
     
    for imagePath in imagePaths:
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        thresh = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY)[1]
     
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        c = max(cnts, key=cv2.contourArea)
        (x, y, w, h) = cv2.boundingRect(c)
        roi = cv2.resize(thresh[y:y + h, x:x + w], (50, 50))#忽略图片大小的影响
        moments = cv2.HuMoments(cv2.moments(roi)).flatten()
        data.append(moments)
    D = pairwise_distances(data).sum(axis=1)
    i = np.argmax(D) #获取距离最大的图形,圆形距离很小,矩形距离较大
     
    image = cv2.imread(imagePaths[i])
    print("Found square: {}".format(imagePaths[i]))
    cv2.imshow("Outlier", image)
    cv2.waitKey(0)

     5、Zernike时刻

    运用Zernike矩阵量化图像中的形状。在图片中寻找某个特定的形状

    from scipy.spatial import distance as dist
    import numpy as np
    import mahotas
    import cv2
    import imutils
    
    def describe_shapes(image):
        shapeFeatures = []
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (13, 13), 0)
        cv2.imshow("2", blurred)
        thresh = cv2.threshold(blurred, 120, 255, cv2.THRESH_BINARY)[1]
        thresh  = cv2.dilate(thresh, None, iterations = 4)
        thres = cv2.erode(thresh, None, iterations = 2)
        cv2.imshow("1", thres)
        cv2.waitKey(0)
        
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    
        for c in cnts:
            mask = np.zeros(image.shape[:2], dtype = "uint8")
            cv2.drawContours(mask, [c], -1, 255, -1)
            (x, y, w, h) = cv2.boundingRect(c)
            roi = mask[y:y + h, x:x + w]
            cv2.imshow("roi", roi)
            cv2.waitKey(0)
            features = mahotas.features.zernike_moments(roi, cv2.minEnclosingCircle(c)[1], degree = 8)
            shapeFeatures.append(features)
    
        return(cnts, shapeFeatures)
    
    refImage = cv2.imread("2.jpg")
    (_, gameFeatures)  = describe_shapes(refImage)
    shapesImage = cv2.imread("1.jpg")
    (cnts, shapeFeatures) = describe_shapes(shapesImage)
    D = dist.cdist(gameFeatures, shapeFeatures)
    i = np.argmin(D) #获取最小距离的下标
    
    for (j, c) in enumerate(cnts):
        if i != j:
            box = cv2.minAreaRect(c)
            box = np.int0(cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box))
            cv2.drawContours(shapesImage, [box],  - 1, (0, 0, 255), 2)
    
    box = cv2.minAreaRect(cnts[i])
    box = np.int0(cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box))
    cv2.drawContours(shapesImage, [box],  - 1, (0, 255, 0), 2)
    (x, y, w, h) = cv2.boundingRect(cnts[i])
    cv2.putText(shapesImage, "FOUND!", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 3)
    cv2.imshow("Input Image", refImage)
    cv2.imshow("Detected Shapes", shapesImage)
    cv2.waitKey(0)

     6、Haralick纹理功能

    依据数据学习,分析纹理,再将数据图片依据纹理分类:

    from sklearn.svm import LinearSVC
    import argparse
    import glob
    import mahotas
    import cv2
    
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--training", required = True, help = "Path to the dataset of textures")
    ap.add_argument("-t", "--test", required = True,help = "Path to the test images" )
    args = vars(ap.parse_args())
    
    print("[INFO] extracting features...")
    data = []
    labels = []
    for imagePath in glob.glob(args["training"] + "/*.jpg"):
        image = cv2.imread(imagePath)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    
        texture = imagePath[imagePath.rfind("/") + 1:].split("_")[0]
        features = mahotas.features.haralick(image).mean(axis = 0)
        data.append(features)
        labels.append(texture)
    
    print("[INFO] training model...")
    model = LinearSVC(C = 10.0, random_state = 42)
    model.fit(data, labels)
    print("[INFO] classifying...")
    
    for imagePath in glob.glob(args["test"] + "/*.jpg"):
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        features = mahotas.features.haralick(gray).mean(axis = 0)
        print features
        pred  = model.predict(features.reshape(1,  - 1))[0]
        print pred
        cv2.putText(image, pred, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 3)
        cv2.imshow("Image", image)
        cv2.waitKey(0)

    7、本地二进制模式

    原理:LBP 在本地处理像素  而不是使用灰度共生矩阵。通过量化小区域特征,分析图片的特征向量,再对图片进行分类。

    注意点:记住半径r  和点数p的影响也很重要积分越多p  你品尝,更多的模式,你可以编码,但在同一时间,你增加你的计算成本。另一方面,如果增加半径大小r,  则可以在图像中捕捉较大的纹理细节。但是,如果增加r  而不增加p  ,那么您将失去LBP描述符的局部区分能力。

    例:时尚索引

    文件结构:
    |--- pyimagesearch
    |    |--- __init__.py
    |    |--- descriptors
    |    |    |---- __init__.py
    |    |    |--- localbinarypatterns.py
    |--- search_shirts.py

    作用:创建二进制描述符。

    localbinarypatterns.py

    from skimage import feature
    import numpy as np
    
    class LocalBinaryPatterns:
        def __init__(self, numPoints, radius): #围绕中心像素的图案半径,半径外围点数,决定计算量
    
            self.numPoints = numPoints
            self.radius = radius
    
        def describe(self, image, eps=1e-7):
            lbp = feature.local_binary_pattern(image, self.numPoints, self.radius, method="uniform")
            (hist, _) = np.histogram(lbp.ravel(), bins=range(0, self.numPoints + 3),
                range=(0, self.numPoints + 2))
    
            # normalize the histogram
            hist = hist.astype("float")
            hist /= (hist.sum() + eps)
    
            # return the histogram of Local Binary Patterns
            return hist

    作用:测试二进制描述符的纹理特征效果

    运行命令:python search_shirts.py --dataset shirts --query queries/query_01.jpg

    search_shirts.py:

    from __future__ import print_function
    from pyimagesearch import LocalBinaryPatterns
    from imutils import paths
    import numpy as np
    import argparse
    import cv2
    
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--dataset", required=True, help="path to the dataset of shirt images")
    ap.add_argument("-q", "--query", required=True, help="path to the query image")
    args = vars(ap.parse_args())
    
    desc = LocalBinaryPatterns(24, 8)
    index = {}
    
    for imagePath in paths.list_images(args["dataset"]):
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        hist = desc.describe(gray)
    
        filename = imagePath[imagePath.rfind("/") + 1:]
        index[filename] = hist
    
    query = cv2.imread(args["query"])
    queryFeatures = desc.describe(cv2.cvtColor(query, cv2.COLOR_BGR2GRAY))
    
    cv2.imshow("Query", query)
    results = {}
    
    for (k, features) in index.items():
    
        d = 0.5 * np.sum(((features - queryFeatures) ** 2) / (features + queryFeatures + 1e-10))
        results[k] = d
    
    
    results = sorted([(v, k) for (k, v) in results.items()])[:3]#选取前较小距离(相似性)高的,前3个结果
    
    for (i, (score, filename)) in enumerate(results):#将前3个结果显示出来
        print("#%d. %s: %.4f" % (i + 1, filename, score))
        image = cv2.imread(args["dataset"] + "/" + filename)
        cv2.imshow("Result #{}".format(i + 1), image)
        cv2.waitKey(0)

     8、定向梯度直方图

    原理:运用HOG描述符,他主要用于描述图像中物体的结构形状和外观,使其成为物体分类的优秀描述符。但是,由于HOG捕获局部强度梯度和边缘方向,因此它也会产生良好的纹理描述符

    知识点:pixels_per_cell中的像素  越多,我们的表示越粗糙。类似地,pixels_per_cell的较小值将产生更细粒度(轮廓更明显)

    疑问:1、from sklearn.neighbors import KNeighborsClassifier

    解释:http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html

    运行命令:python recognize_car_logos.py --training car_logos --test test_images

    recognize_car_logs.py

    from sklearn.neighbors import KNeighborsClassifier
    from skimage import exposure
    from skimage import feature
    from imutils import paths
    import argparse
    import imutils
    import cv2
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--training", required=True, help="Path to the logos training dataset")
    ap.add_argument("-t", "--test", required=True, help="Path to the test dataset")
    args = vars(ap.parse_args())
     
    print "[INFO] extracting features..."
    data = []
    labels = []
    
    for imagePath in paths.list_images(args["training"]):  #提取每个标志的hog特征向量
        make = imagePath.split("/")[-2]
     
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        edged = imutils.auto_canny(gray)
     
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        c = max(cnts, key=cv2.contourArea)
     
        (x, y, w, h) = cv2.boundingRect(c)
        logo = gray[y:y + h, x:x + w]
        logo = cv2.resize(logo, (200, 100))
     
        H = feature.hog(logo, orientations=9, pixels_per_cell=(10, 10),
            cells_per_block=(2, 2), transform_sqrt=True, block_norm="L1")
     
        data.append(H)
        labels.append(make)
    
    print("[INFO] training classifier...") 
    model = KNeighborsClassifier(n_neighbors=1)  #对特征数据进行K值分类
    model.fit(data, labels)
    print("[INFO] evaluating...")
    
    for (i, imagePath) in enumerate(paths.list_images(args["test"])):
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        logo = cv2.resize(gray, (200, 100))
    
        (H, hogImage) = feature.hog(logo, orientations=9, pixels_per_cell=(10, 10),
            cells_per_block=(2, 2), transform_sqrt=True, block_norm="L1", visualise=True)
        pred = model.predict(H.reshape(1, -1))[0]
     
        hogImage = exposure.rescale_intensity(hogImage, out_range=(0, 255))
        hogImage = hogImage.astype("uint8")
        cv2.imshow("HOG Image #{}".format(i + 1), hogImage)
     
        cv2.putText(image, pred.title(), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
            (0, 255, 0), 3)
        cv2.imshow("Test Image #{}".format(i + 1), image)
        cv2.waitKey(0)

    9、关键点检测

    9.1、FAST关键点检测

    原理:必须有至少Ñ沿着连续像素圆形周边具有半径- R所有或者更暗比中心像素由阈值t

    疑问:是否可以修改参数,半径-R和N值?

    运行命令:python fast_keypoint.py

    fast_keypoint.py

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("FAST")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.FastFeatureDetector_create()
        kps = detector.detect(gray, None)
     
    print("# of keypoints: {}".format(len(kps)))
     
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.2Harris关键点检测

    原理:将分别在xy方向上对该区域中的梯度值求和:

     sum(G_ {x})^ {2} 和  sum(G_ {y})^ {2}

    如果这两个值都足够“大”,那么我们可以将该区域定义为角落。该过程针对输入图像中的每个像素完成这种方法是有效的,因为红色圆圈内的区域会有大量的水平和垂直梯度 - 当发生这种情况时,我们知道我们找到了一个角落.

    疑问:1、harris函数的参数的作用:

      • img - 数据类型为 float32 的输入图像。
      • blockSize - 角点检测中要考虑的领域大小。
      • ksize - Sobel 求导中使用的窗口大小
      • k - Harris 角点检测方程中的自由参数,取值参数为 [0,04,0.06].

    2、harris什么时候被调用?

    harris_keypoint.py

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    def harris(gray, blockSize=2, apetureSize=3, k=0.1, T=0.02):
        gray = np.float32(gray)
        H = cv2.cornerHarris(gray, blockSize, apetureSize, k)
     
        kps = np.argwhere(H > T * H.max())
        kps = [cv2.KeyPoint(pt[1], pt[0], 3) for pt in kps]
     
        return kps
    
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("HARRIS")
        kps = detector.detect(gray)
     
    
    else:
        kps = harris(gray)
     
    print("# of keypoints: {}".format(len(kps)))
    
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    # show the image
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.3、GFTT关键点检测

    原理:提出了以下R的计算来表明一个地区是否是一个角落:R = min( lambda_ {1}, lambda_ {2})在这种情况下,我们只是取特征值分解分量的最小值。如果这个值R大于我们的阈值T  (即R> = T),那么我们可以将该区域标记为拐角。(在harris基础上改进)

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    def gftt(gray, maxCorners=0, qualityLevel=0.01, minDistance=1,
        mask=None, blockSize=3, useHarrisDetector=False, k=0.04):
    
        kps = cv2.goodFeaturesToTrack(gray, maxCorners, qualityLevel,
            minDistance, mask=mask, blockSize=blockSize,
            useHarrisDetector=useHarrisDetector, k=k)
     
        return [cv2.KeyPoint(pt[0][0], pt[0][1], 3) for pt in kps]
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("GFTT")
        kps = detector.detect(gray)
    
    else:
        kps = gftt(gray)
     
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    print("# of keypoints: {}".format(len(kps)))
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

     9.4、DoG关键点检测器

    通过对图像缩放变化,寻找角点,获取的角点绘制出来的圆有大小区别。

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("SIFT")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.xfeatures2d.SIFT_create()
        (kps, _) = detector.detectAndCompute(gray, None)
     
    print("# of keypoints: {}".format(len(kps)))
    
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.5、Fast_Hessian关键点检测

    作用:类似于高斯差分,快速Hessian关键点检测器用于定位图像中可重复的“斑点”状区域。这些区域可能是边缘,角落或两者

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
    
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("SURF")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.xfeatures2d.SURF_create()
        (kps, _) = detector.detectAndCompute(gray, None)
     
    print("# of keypoints: {}".format(len(kps)))
    
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.6、STAR关键点检测

    运用情况:用于检测图像中的“斑点”状区域

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("STAR")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.xfeatures2d.StarDetector_create()
        kps = detector.detect(gray)
     
    print("# of keypoints: {}".format(len(kps)))
     
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.7、MSER关键点检测

    作用:MSER检测器用于检测图像中的“斑点”状结构。假设这些区域很小,具有相对相同的像素强度,并且被对比像素包围

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    # detect MSER keypoints in the image
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("MSER")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.MSER_create()
        kps = detector.detect(gray, None)
     
    print("# of keypoints: {}".format(len(kps)))
     
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.8、密集关键点检测

    作用:浓密检测器 将图像中的每个k像素标记为关键点

    from __future__ import print_function
    import numpy as np
    import argparse
    import cv2
    import imutils
     
    def dense(image, step, radius):
        kps = []
     
        for x in range(0, image.shape[1], step):
            for y in range(0, image.shape[0], step):
                kps.append(cv2.KeyPoint(x, y, radius * 2))
     
        return kps
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-s", "--step", type=int, default=28, help="step (in pixels) of the dense detector")
    args = vars(ap.parse_args())
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    kps = []
    radii = (4, 8, 12)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("Dense")
        detector.setInt("initXyStep", args["step"])
        rawKps = detector.detect(gray)
     
    else:
        rawKps = dense(gray, args["step"], 1)
     
    for rawKp in rawKps:
        for r in radii:
            kp = cv2.KeyPoint(x=rawKp.pt[0], y=rawKp.pt[1], _size=r * 2)
            kps.append(kp)
     
    print("# dense keypoints: {}".format(len(rawKps)))
    print("# dense + multi radii keypoints: {}".format(len(kps)))
     
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 1)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.9、BRISK关键点检测器

    运用情况:多尺度版本

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("BRISK")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.BRISK_create()
        kps = detector.detect(gray, None)
     
    print("# of keypoints: {}".format(len(kps)))
    
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

    9.10、ORB关键点检测器的运用

    作用:ORB用于检测图像中的角点

    from __future__ import print_function
    import numpy as np
    import cv2
    import imutils
     
    image = cv2.imread("next.png")
    orig = image.copy()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("ORB")
        kps = detector.detect(gray)
     
    else:
        detector = cv2.ORB_create()
        kps = detector.detect(gray, None)
     
    print("# of keypoints: {}".format(len(kps)))
     
    for kp in kps:
        r = int(0.5 * kp.size)
        (x, y) = np.int0(kp.pt)
        cv2.circle(image, (x, y), r, (0, 255, 255), 2)
     
    cv2.imshow("Images", np.hstack([orig, image]))
    cv2.waitKey(0)

     10、局部不变描述符

    10.1、SIFT

    作用:检测关键点并从图像中提取SIFT特征向量

    from __future__ import print_function
    import argparse
    import cv2
    import imutils
    
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
     
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("SIFT")
        extractor = cv2.DescriptorExtractor_create("SIFT")
     
    
        kps = detector.detect(gray)
        (kps, descs) = extractor.compute(gray, kps)
     
    else:
    
        detector = cv2.xfeatures2d.SIFT_create()
     
        (kps, descs) = detector.detectAndCompute(gray, None)
     
    
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))

    10.2、RootSIFT

    作用:定义RootSIFT检测器,获取关键点特征

    rootsift.py

    import numpy as np
    import cv2
    import imutils
     
    class RootSIFT:
        def __init__(self):
            if imutils.is_cv2():
                self.extractor = cv2.DescriptorExtractor_create("SIFT")
     
            else:
                self.extractor = cv2.xfeatures2d.SIFT_create()
     
        def compute(self, image, kps, eps=1e-7):
            if imutils.is_cv2:
                (kps, descs) = self.extractor.compute(image, kps)
     
            else:
                (kps, descs) = self.extractor.detectAndCompute(image, None)
     
            if len(kps) == 0:
                return ([], None)
    
            descs /= (descs.sum(axis=1, keepdims=True) + eps)
            descs = np.sqrt(descs)
    
            return (kps, descs)

    作用:从图像中提取RootSIFT描述符

    extract_rpptsift.py

    from __future__ import print_function
    from pyimagesearch.descriptors import RootSIFT
    import argparse
    import cv2
    import imutils
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
     
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("SIFT")
        extractor = RootSIFT()
     
        kps = detector.detect(gray)
     
    else:
        detector = cv2.xfeatures2d.SIFT_create()
        extractor = RootSIFT()
     
        (kps, _) = detector.detectAndCompute(gray, None)
    
    (kps, descs) = extractor.compute(gray, kps)
     
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))

    10.3、SURF

    作用:SURF的第一步是选择围绕关键点的图像的矩形区域。在关键点检测阶段确定区域的确切大小

    extract_surf.py

    from __future__ import print_function
    import argparse
    import cv2
    import imutils
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
     
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("SURF")
        extractor = cv2.DescriptorExtractor_create("SURF")
     
        kps = detector.detect(gray)
        (kps, descs) = extractor.compute(gray, kps)
     
    else:
        detector = cv2.xfeatures2d.SURF_create()
     
        (kps, descs) = detector.detectAndCompute(gray, None)
     
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))

     10.4、实值特征匹配

    知识点:提取关键点的图像坐标,例:ksp[0].pt

    运行命令:1、python draw_matches.py --first jp_01.png --second jp_02.png --detector SURF --extractor SIFT

    2、python draw_matches.py --first jp_01.png --second jp_02.png --detector SURF --extractor RootSIFT

    3、python draw_matches.py --first jp_01.png --second jp_02.png --detector SURF --extractor SURF

    #coding=utf-8
    from __future__ import print_function
    import numpy as np
    import argparse
    import cv2
    from imutils.feature.factories import FeatureDetector_create, DescriptorExtractor_create, DescriptorMatcher_create
    ap = argparse.ArgumentParser()
    ap.add_argument("-f", "--first", required = True, help = "Path to first image") #提取关键点和特征向量的第一幅图像的路经
    ap.add_argument("-s", "--second", required = True, help = "Path to second image")#提取关键点和特征向量的第二幅图像的路经
    ap.add_argument("-d", "--detector", type = str, default="SURF", help = "Kepyoint detector to use.Options ['BRISK', 'DENSE', 'DOG', 'SIFT', 'FAST', 'FASTHESSIAN', 'SURF', 'GFTT','HARRIS',  'MSER',  'ORB',  'STAR']")#用于在俩个图像执行关键点检测的关键点的检测器
    ap.add_argument("-e", "--extractor", type = str, default = "SIFT", help = "Keypoint detector to use.Options['RootSIFT', 'SIFT', 'SURF']")#关键点区域提取局部不变描述符
    ap.add_argument("-m", "--matcher", type = str, default ="BruteForce", help = "Feature matcher to use. Options ['BruteForce', 'BruteForce-SL2', 'BruteForce-L1','FlannBased']")#寻找没对描述符最小距离方法
    #ap.add_argument("-v", "--visualize", type = str, default = "Yes", help="Whether the visualiz image should be shown. Options ['Yes', 'No', 'Each']")
    ap.add_argument("-v", "--visualize", type = str, default = "Yes", help="Whether the visualiztion image should be shown. Options ['Yes',  'No',  'Each']")#绘制对关键点和描述符之间的匹配
    args = vars(ap.parse_args())
    
    if args["detector"] == "DOG":
        detector = FeatureDetector_create("SIFT")
    elif args["detector"] == "FASTHESSIAN":
        detector = FeatureDetector_create("SURF")
    else:
        detector = FeatureDetector_create(args["detector"])
    extractor = DescriptorExtractor_create(args["extractor"])#提取关键点区域的特征描述符
    
    matcher = DescriptorMatcher_create(args["matcher"])
    imageA = cv2.imread(args["first"])
    imageB = cv2.imread(args["second"])
    grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
    #提取关键点
    kpsA = detector.detect(grayA)
    kpsB = detector.detect(grayB)
    #提取关键的局部特征描述符
    (kpsA, featuresA) = extractor.compute(grayA, kpsA)
    (kpsB, featuresB)  = extractor.compute(grayB, kpsB)
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
    matches = []
    if rawMatches is not None:
        for m in rawMatches:
            #筛选符合条件的关键点
            if len(m) == 2 and m[0].distance < m[1].distance*0.8:
                matches.append((m[0].trainIdx, m[0].queryIdx))
    
        print("# of keypoints from first image:{}".format(len(kpsA)))

     11、二进制描述符

    11.1、ORB

    from __future__ import print_function
    import argparse
    import cv2
    import imutils
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
     
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
    
        detector = cv2.FeatureDetector_create("ORB")
        extractor = cv2.DescriptorExtractor_create("ORB")
     
        kps = detector.detect(gray)
        (kps, descs) = extractor.compute(gray, kps)
    
    else:
        detector = cv2.ORB_create()
    
        (kps, descs) = detector.detectAndCompute(gray, None)
     
    # show the shape of the keypoints and local invariant descriptors array
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))

    11.2、BRISK

    from __future__ import print_function
    import argparse
    import cv2
    import imutils
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
    
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("BRISK")
        extractor = cv2.DescriptorExtractor_create("BRISK")
     
        kps = detector.detect(gray)
        (kps, descs) = extractor.compute(gray, kps)
     
    else:
        detector = cv2.BRISK_create()
     
    
        (kps, descs) = detector.detectAndCompute(gray, None)
     
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))

    11.3、BRIEF

    from __future__ import print_function
    import argparse
    import cv2
    import imutils
     
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
    
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("FAST")
        extractor = cv2.DescriptorExtractor_create("BRIEF")
     
    else:
        detector = cv2.FastFeatureDetector_create()
        extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
     
    
    kps = detector.detect(gray)
    (kps, descs) = extractor.compute(gray, kps)
     
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))
    from __future__ import print_function
    import argparse
    import cv2
    import imutils
     
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())
     
    image = cv2.imread(args["image"])
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     
    if imutils.is_cv2():
        detector = cv2.FeatureDetector_create("FAST")
        extractor = cv2.DescriptorExtractor_create("FREAK")
        kps = detector.detect(gray)
        (kps, descs) = extractor.compute(gray, kps)
     
    else:
    
        detector = cv2.FastFeatureDetector_create()
        extractor = cv2.xfeatures2d.FREAK_create()
     
        kps = detector.detect(gray, None)
        (kps, descs) = extractor.compute(gray, kps)
     
    print("[INFO] # of keypoints detected: {}".format(len(kps)))
    print("[INFO] feature vector shape: {}".format(descs.shape))
  • 相关阅读:
    Spring9:Autowire(自动装配)机制
    【Spring源码分析】非懒加载的单例Bean初始化前后的一些操作
    记一次synchronized锁字符串引发的坑兼再谈Java字符串
    Cglib及其基本使用
    Java回调机制解读
    【设计模式总结】对常用设计模式的一些思考
    【Spring源码分析】非懒加载的单例Bean初始化过程(下篇)
    【Spring源码分析】非懒加载的单例Bean初始化过程(上篇)
    【Spring源码分析】Bean加载流程概览
    Spring8:一些常用的Spring Bean扩展接口
  • 原文地址:https://www.cnblogs.com/w-x-me/p/8392395.html
Copyright © 2011-2022 走看看