zoukankan      html  css  js  c++  java
  • camshift.py OpenCv例程阅读

    源码在这

    #!/usr/bin/env python
    
    '''
    Camshift tracker
    ================
    
    This is a demo that shows mean-shift based tracking
    You select a color objects such as your face and it tracks it.
    This reads from video camera (0 by default, or the camera number the user enters)
    
    http://www.robinhewitt.com/research/track/camshift.html
    
    Usage:
    ------
        camshift.py [<video source>]
    
        To initialize tracking, select the object with mouse
    
    Keys:
    -----
        ESC   - exit
        b     - toggle back-projected probability visualization
    '''
    
    import numpy as np
    import cv2
    import video
    
    
    class App(object):
        def __init__(self, video_src):
            self.cam = video.create_capture(video_src) # 开启摄像头
            ret, self.frame = self.cam.read()          # 读取一帧图片
            cv2.namedWindow('camshift')                    #创建 名为 camshift的窗口
            cv2.setMouseCallback('camshift', self.onmouse)  #在窗口上增加回调函数
    
            self.selection = None
            self.drag_start = None
            self.tracking_state = 0
            self.show_backproj = False
    
        def onmouse(self, event, x, y, flags, param):
            x, y = np.int16([x, y]) # BUG
            if event == cv2.EVENT_LBUTTONDOWN:
                self.drag_start = (x, y)
                self.tracking_state = 0
                return
            if self.drag_start:
                if flags & cv2.EVENT_FLAG_LBUTTON:
                    h, w = self.frame.shape[:2]
                    xo, yo = self.drag_start
                    x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
                    x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
                    self.selection = None
                    if x1-x0 > 0 and y1-y0 > 0:
                        self.selection = (x0, y0, x1, y1)
                else:
                    self.drag_start = None
                    if self.selection is not None:
                        self.tracking_state = 1
    
        def show_hist(self):
            bin_count = self.hist.shape[0]
            bin_w = 24
            img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
            for i in xrange(bin_count):
                h = int(self.hist[i])
                cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
            img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
            cv2.imshow('hist', img)
    
        def run(self):
            while True:
                ret, self.frame = self.cam.read()   #读取一帧图片
                vis = self.frame.copy()                # 复制一份
                hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # 将图片从 BGR 空间转换到 HSV 空间
                mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))  # 找出颜色区间在 np.array((0., 60., 32.)), np.array((180., 255., 255.)
    
                if self.selection:
                    x0, y0, x1, y1 = self.selection
                    self.track_window = (x0, y0, x1-x0, y1-y0)
                    hsv_roi = hsv[y0:y1, x0:x1]
                    mask_roi = mask[y0:y1, x0:x1]
                    hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
                    cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
                    self.hist = hist.reshape(-1)
                    self.show_hist()
    
                    vis_roi = vis[y0:y1, x0:x1]
                    cv2.bitwise_not(vis_roi, vis_roi)
                    vis[mask == 0] = 0
    
                if self.tracking_state == 1:
                    self.selection = None
                    prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                    prob &= mask
                    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
                    track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
    
                    if self.show_backproj:
                        vis[:] = prob[...,np.newaxis]
                    try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
                    except: print track_box
    
                cv2.imshow('camshift', vis)
    
                ch = 0xFF & cv2.waitKey(5)
                if ch == 27:
                    break
                if ch == ord('b'):
                    self.show_backproj = not self.show_backproj
            cv2.destroyAllWindows()
    
    
    if __name__ == '__main__':
        import sys
        try: video_src = sys.argv[1]
        except: video_src = 0
        print __doc__
        App(video_src).run()

    第117行:sys.argv[]  是用来获取命令行参数的,常见的sys.argv[0]表示本身文件路径,所以一般都从1 开始 这里我将官方文档的教程源码抄下来大家看看就懂了

    # jack.py
    #
    !/usr/bin/python # Filename: using_sys.py import sys print 'The command line arguments are:' for i in sys.argv: print i print ' The PYTHONPATH is', sys.path, ' '

      在终端输入

    python jack.py ba la ba la 

      结果显示

    The command line arguments are:
    jack.py
    ba
    la
    ba
    la
    
    
    The PYTHONPATH is ['/home/x-power/OpenCV', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/home/x-power/.local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages/PILcompat', '/usr/lib/python2.7/dist-packages/gtk-2.0'] 



  • 相关阅读:
    【神经网络与深度学习】leveldb的实现原理
    【神经网络与深度学习】【CUDA开发】caffe-windows win32下的编译尝试
    【神经网络与深度学习】【CUDA开发】caffe-windows win32下的编译尝试
    【Qt开发】QString与数字类型的转换(不同进制)
    【Qt开发】QString与数字类型的转换(不同进制)
    【Qt开发】QT中用函数把float转化成QString
    【Qt开发】QT中用函数把float转化成QString
    【Qt开发】QTextEdit 外观属性设置
    【Qt开发】QTextEdit 外观属性设置
    oracle启动、关闭
  • 原文地址:https://www.cnblogs.com/A-FM/p/6640352.html
Copyright © 2011-2022 走看看