zoukankan      html  css  js  c++  java
  • caffe fastercbnnahdemo

    https://download.csdn.net/download/zefan7564/10148990

    https://blog.csdn.net/qq_37124237/article/details/81087505

    目标检测 Faster R-CNN运行及实时性DEMO测试

    1. #!/usr/bin/env python
    2.  
    3. # --------------------------------------------------------
    4. # Faster R-CNN
    5. # Copyright (c) 2015 Microsoft
    6. # Licensed under The MIT License [see LICENSE for details]
    7. # Written by Ross Girshick
    8. # --------------------------------------------------------
    9.  
    10. """
    11. Demo script showing detections in sample images.
    12.  
    13. See README.md for installation instructions before running.
    14. """
    15.  
    16. import _init_paths
    17. from fast_rcnn.config import cfg
    18. from fast_rcnn.test import im_detect
    19. from fast_rcnn.nms_wrapper import nms
    20. from utils.timer import Timer
    21. import matplotlib.pyplot as plt
    22. import numpy as np
    23. import scipy.io as sio
    24. import caffe, os, sys, cv2
    25. import argparse
    26.  
    27. CLASSES = ('__background__',
    28. 'ship')
    29.  
    30. NETS = {'vgg16': ('VGG16',
    31. 'VGG16_faster_rcnn_final.caffemodel'),
    32. 'zf': ('ZF',
    33. 'ZF_faster_rcnn_final.caffemodel'),
    34. 'wyx': ('wyx','vgg_cnn_m_1024_faster_rcnn_iter_1000.caffemodel')}
    35.  
    36.  
    37. def vis_detections(im, class_name, dets, thresh=0.5):
    38. """Draw detected bounding boxes."""
    39. inds = np.where(dets[:, -1] >= thresh)[0]
    40. if len(inds) == 0:
    41. return
    42.  
    43. im = im[:, :, (2, 1, 0)]
    44. fig, ax = plt.subplots(figsize=(12, 12))
    45. ax.imshow(im, aspect='equal')
    46. for i in inds:
    47. bbox = dets[i, :4]
    48. score = dets[i, -1]
    49.  
    50. ax.add_patch(
    51. plt.Rectangle((bbox[0], bbox[1]),
    52. bbox[2] - bbox[0],
    53. bbox[3] - bbox[1], fill=False,
    54. edgecolor='red', linewidth=3.5)
    55. )
    56. ax.text(bbox[0], bbox[1] - 2,
    57. '{:s} {:.3f}'.format(class_name, score),
    58. bbox=dict(facecolor='blue', alpha=0.5),
    59. fontsize=14, color='white')
    60.  
    61. ax.set_title(('{} detections with '
    62. 'p({} | box) >= {:.1f}').format(class_name, class_name,
    63. thresh),
    64. fontsize=14)
    65. plt.axis('off')
    66. plt.tight_layout()
    67. plt.draw()
    68.  
    69.  
    70. def vis_detections_video(im, class_name, dets, thresh=0.5):
    71. """Draw detected bounding boxes."""
    72. global lastColor,frameRate
    73. inds = np.where(dets[:, -1] >= thresh)[0]
    74. if len(inds) == 0:
    75. return im
    76.  
    77. for i in inds:
    78. bbox = dets[i, :4]
    79. score = dets[i, -1]
    80. cv2.rectangle(im,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),2)
    81. cv2.rectangle(im,(int(bbox[0]),int(bbox[1]-20)),(int(bbox[0]+200),int(bbox[1])),(10,10,10),-1)
    82. cv2.putText(im,'{:s} {:.3f}'.format(class_name, score),(int(bbox[0]),int(bbox[1]-2)),cv2.FONT_HERSHEY_SIMPLEX,.75,(255,255,255))#,cv2.CV_AA)
    83.  
    84. return im
    85.  
    86.  
    87.  
    88. def demo(net, im):
    89. """Detect object classes in an image using pre-computed object proposals."""
    90. global frameRate
    91. # Load the demo image
    92. #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    93. #im = cv2.imread(im_file)
    94.  
    95. # Detect all object classes and regress object bounds
    96. timer = Timer()
    97. timer.tic()
    98. scores, boxes = im_detect(net, im)
    99. timer.toc()
    100. print ('Detection took {:.3f}s for '
    101. '{:d} object proposals').format(timer.total_time, boxes.shape[0])
    102. frameRate = 1.0/timer.total_time
    103. print "fps: " + str(frameRate)
    104. # Visualize detections for each class
    105. CONF_THRESH = 0.8
    106. NMS_THRESH = 0.3
    107. for cls_ind, cls in enumerate(CLASSES[1:]):
    108. cls_ind += 1 # because we skipped background
    109. cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
    110. cls_scores = scores[:, cls_ind]
    111. dets = np.hstack((cls_boxes,
    112. cls_scores[:, np.newaxis])).astype(np.float32)
    113. keep = nms(dets, NMS_THRESH)
    114. dets = dets[keep, :]
    115. vis_detections_video(im, cls, dets, thresh=CONF_THRESH)
    116. cv2.putText(im,'{:s} {:.2f}'.format("FPS:", frameRate),(1750,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255))
    117. cv2.imshow(videoFilePath.split('/')[len(videoFilePath.split('/'))-1],im)
    118. cv2.waitKey(20)
    119.  
    120.  
    121. def parse_args():
    122. """Parse input arguments."""
    123. parser = argparse.ArgumentParser(description='Faster R-CNN demo')
    124. parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
    125. default=0, type=int)
    126. parser.add_argument('--cpu', dest='cpu_mode',
    127. help='Use CPU mode (overrides --gpu)',
    128. action='store_true')
    129. parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
    130. choices=NETS.keys(), default='vgg16')
    131.  
    132. args = parser.parse_args()
    133.  
    134. return args
    135.  
    136.  
    137.  
    138.  
    139. if __name__ == '__main__':
    140. cfg.TEST.HAS_RPN = True # Use RPN for proposals
    141.  
    142. args = parse_args()
    143.  
    144. # prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
    145. # 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
    146. prototxt = '/home/yexin/py-faster-rcnn/models/pascal_voc/VGG_CNN_M_1024/faster_rcnn_end2end/test.prototxt'
    147. # print 'see prototxt path{}'.format(prototxt)
    148.  
    149.  
    150. # caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
    151. # NETS[args.demo_net][1])
    152. caffemodel = '/home/yexin/py-faster-rcnn/output/faster_rcnn_end2end/voc_2007_trainval/vgg_cnn_m_1024_faster_rcnn_iter_100.caffemodel'
    153.  
    154.  
    155. # print ' ok'
    156.  
    157. if not os.path.isfile(caffemodel):
    158. raise IOError(('{:s} not found. Did you run ./data/script/'
    159. 'fetch_faster_rcnn_models.sh?').format(caffemodel))
    160. print ' ok'
    161.  
    162. if args.cpu_mode:
    163. caffe.set_mode_cpu()
    164. else:
    165. caffe.set_mode_gpu()
    166. caffe.set_device(args.gpu_id)
    167. cfg.GPU_ID = args.gpu_id
    168. net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    169.  
    170. print ' Loaded network {:s}'.format(caffemodel)
    171.  
    172. # Warmup on a dummy image
    173. im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    174. for i in xrange(2):
    175. _, _= im_detect(net, im)
    176.  
    177. videoFilePath = '/home/yexin/py-faster-rcnn/data/demo/test_1-3.mp4'
    178. videoCapture = cv2.VideoCapture(videoFilePath)
    179. #success, im = videoCapture.read()
    180. while True :
    181. success, im = videoCapture.read()
    182. demo(net, im)
    183. if cv2.waitKey(10) & 0xFF == ord('q'):
    184. break
    185. videoCapture.release()
    186. cv2.destroyAllWindows()
    187.  
  • 相关阅读:
    抽象类abstract
    final关键字特点
    继承ExtendsFour
    继承(继承中构造方法的关系)
    继承ExtendsTwo-super&this
    继承ExtendsOne
    静态
    构造方法与setXxx方法
    15.8
    15.7
  • 原文地址:https://www.cnblogs.com/shuimuqingyang/p/10171300.html
Copyright © 2011-2022 走看看