zoukankan      html  css  js  c++  java
  • 文字检测模型EAST应用详解二 pb的opencv加载

    贴一下opencv加载pb的方法,跟加载其他模型没有区别。
    def main(argv=None):
    # import the necessary packages
    from imutils.object_detection import non_max_suppression
    import numpy as np
    import argparse
    import time
    import cv2
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", type=str,
    help="D:/work/video/EAST/tmp/frame_74.jpg")
    ap.add_argument("-east", "--east", type=str,
    help="D:/work/video/hand_tracking_no_op/hand_tracking/EAST/east_icdar2015_resnet_v1_50_rbox/out.pb")
    ap.add_argument("-c", "--min-confidence", type=float, default=0.5,
    help="minimum probability required to inspect a region")
    ap.add_argument("-w", "--width", type=int, default=320,
    help="resized image width (should be multiple of 32)")
    ap.add_argument("-e", "--height", type=int, default=320,
    help="resized image height (should be multiple of 32)")
    args = vars(ap.parse_args())
    layerNames = [
    "feature_fusion/Conv_7/Sigmoid",
    "feature_fusion/concat_3"]
    # load the pre-trained EAST text detector
    print("[INFO] loading EAST text detector...")
    # net = cv2.dnn.readNet(args["east"])
    pbtxt_path='D:/work/video/EAST/protobuf.pbtxt'
    net = cv2.dnn.readNetFromTensorflow(args["east"])

    # load the input image and grab the image dimensions
    for i in range(15):
    image = cv2.imread(args["image"])
    orig = image.copy()
    (H, W) = image.shape[:2]
    # set the new width and height and then determine the ratio in change
    # for both the width and height
    # resize the image and grab the new image dimensions
    # construct a blob from the image and then perform a forward pass of
    # the model to obtain the two output layer sets
    blob = cv2.dnn.blobFromImage(image, 1.0, (W,H), swapRB=True, crop=False)
    net.setInput(blob)
    start = time.time()
    (scores, geometry) = net.forward(layerNames)

    #geometry = net.forward()
    end = time.time()
    # show timing information on text prediction
    print("[INFO] text detection took {:.6f} seconds".format(end - start))
    # grab the number of rows and columns from the scores volume, then
    # initialize our set of bounding box rectangles and corresponding
    # confidence scores

    (numRows, numCols) = scores.shape[2:4]
    print('numRows, numCols:',numRows, numCols)
    rects = []
    confidences = []
    # loop over the number of rows
    for y in range(0, numRows):
    # extract the scores (probabilities), followed by the geometrical
    # data used to derive potential bounding box coordinates that
    # surround text
    scoresData = scores[0, 0, y]
    xData0 = geometry[0, 0, y]
    xData1 = geometry[0, 1, y]
    xData2 = geometry[0, 2, y]
    xData3 = geometry[0, 3, y]
    anglesData = geometry[0, 4, y]

    # loop over the number of columns
    for x in range(0, numCols):
    # if our score does not have sufficient probability, ignore it
    if scoresData[x] < args["min_confidence"]:
    continue

    (offsetX, offsetY) = (x * 4.0, y * 4.0)
    # extract the rotation angle for the prediction and then
    # compute the sin and cosine
    angle = anglesData[x]
    cos = np.cos(angle)
    sin = np.sin(angle)
    # use the geometry volume to derive the width and height of
    # the bounding box
    h = xData0[x] + xData2[x]
    w = xData1[x] + xData3[x]
    # compute both the starting and ending (x, y)-coordinates for
    # the text prediction bounding box
    endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
    endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
    startX = int(endX - w)
    startY = int(endY - h)
    # add the bounding box coordinates and probability score to
    # our respective lists
    rects.append((startX, startY, endX, endY))
    confidences.append(scoresData[x])
    # apply non-maxima suppression to suppress weak, overlapping bounding
    # boxes
    boxes = non_max_suppression(np.array(rects), probs=confidences)
    print(confidences,boxes)
    # loop over the bounding boxes
    for (startX, startY, endX, endY) in boxes:
    # scale the bounding box coordinates based on the respective
    # ratios
    startX = startX
    startY = startY
    endX = endX
    endY = endY
    print(startX, startY, endX, endY)
    # draw the bounding box on the image
    cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 1)
    # show the output image
    cv2.imwrite('D:/work/video/EAST/tmp/0424.jpg',orig)
    cv2.imshow("Text Detection", orig)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    由于opencv加载和tf加载的输出是不一样的,所以预测之后的数据处理也是不一样的,但是都要用nms来找到最终的box。
    用opencv加载时间上并没有明显的提速,个人认为是opencv dnn没有支持该框架加速,
  • 相关阅读:
    MyEclipse:详细使用教程
    JDK安装与配置详细图文教程
    windows下python3.6版本安装pygame
    windows下如何下载并安装Python
    python的 del 函数是删对象还是删引用
    python strip()函数的用法
    python的垃圾回收机制
    python中的sort方法
    python中del函数的垃圾回收
    两个数交换
  • 原文地址:https://www.cnblogs.com/zwczp/p/12802534.html
Copyright © 2011-2022 走看看