zoukankan      html  css  js  c++  java
  • 活体检测,眨眼方式

    1. 摄像头采集到照片,把照片变成gray图像,减少内存读取。

    2. 利用人脸检测器,在灰度图像上找到人脸,把图像放大1倍,可以注重图像细节。(这个人脸肯能是多张头像。但是我们通常只是处理一张头像)

    detector = dlib.get_frontal_face_detector()  # 人脸检测器

    3. 利用人脸关键点预测器,找到68个特征点。

    predictor = dlib.shape_predictor(r'libs/shape_predictor_68_face_landmarks.dat')
    points = face_utils.shape_to_np(shape)

    4. 从上述的特征点中,找到左眼和右眼相应的点。

    5. 利用 scipy.spatial.distance ,计算欧式距离。

                两点之间距离公式
    • formula

    6.利用欧式距离,来计算眼睛的宽高比。宽高比小于一定的阈值 0.3 ,我们认为是眨了一下眼睛了。

    7. cv2 有个画轮廓的方法:

    cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
    #!/usr/bin/env python
    # !_*_ coding:utf-8 _*_
    from scipy.spatial import distance
    import dlib
    import cv2
    from imutils import face_utils
    import numpy as np
    
    
    def eye_aspect_ratio(eye):
        """
        计算 EAR,
        欧式距离。 euclidean:欧式距离
        :param eye: 眼部特征点数组
        :return: EAR值
        """
        A = distance.euclidean(eye[1], eye[5])
        B = distance.euclidean(eye[2], eye[4])
        C = distance.euclidean(eye[0], eye[3])
        return (A + B) / (2.0 * C)
    
    
    detector = dlib.get_frontal_face_detector()  # 人脸检测器
    predictor = dlib.shape_predictor(r'libs/shape_predictor_68_face_landmarks.dat')  # 人脸关键点预测器
    # 设置眼睛纵横比的阈值
    EAR_THRESH = 0.3
    # 我们假定连续3帧以上EAR的值都小于阈值,才认为产生了眨眼动作。
    EAR_CONSEC_FRAMES = 3
    
    # 人脸特征点中对应眼睛的特征点的序号(36~41;42~47);是从0开始计算的
    RIGHT_EYE_START = 36
    RIGHT_EYE_END = 41
    LEFT_EYE_START = 42
    LEFT_EYE_END = 47
    
    frame_counter = 0  # 连续帧的计数
    blink_counter = 0  # 眨眼的计数
    
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    while True:
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = detector(gray, 1)  # 图像放大1倍,注意图像细节
        # print(rects)
    
        if len(rects) > 0:
            shape = predictor(gray, rects[0])  # 检测特征点
            # 把特征点转换成点的坐标信息
            points = face_utils.shape_to_np(shape)
            leftEye = points[LEFT_EYE_START:LEFT_EYE_END + 1]  # 取出左眼的特征点
            rightEye = points[RIGHT_EYE_START:RIGHT_EYE_END + 1]
    
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
    
            # 求EAR得平局值
            ear = (leftEAR + rightEAR) / 2.0
            print(ear)
    
            # 找出左眼,右眼的轮廓
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
    
            # 如果ear小于阈值,开始计算连续帧,眨眼次数
            if ear < EAR_THRESH:
                frame_counter += 1
            else:
                if frame_counter >= EAR_CONSEC_FRAMES:
                    print("眨眼检测成功,请进入")
                    blink_counter += 1
                    break
                frame_counter = 0
            # cv2.putText(frame,"blink:{}".format(blink_counter))
    
        cv2.imshow("window", frame)
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break
    
    cap.release()
    cv2.destroyAllWindows()
    

      

    #!/usr/bin/env python
    # !_*_ coding:utf-8 _*_
    import cv2
    import face_recognition
    from imutils import face_utils
    from scipy.spatial import distance
    import numpy as np
    
    EAR_THRESH = 0.28
    CONTINUE_FRAME = 3
    blink_count = 0
    
    
    def eye_aspect_ratio(eye):
        A = distance.euclidean(eye[1], eye[5])
        B = distance.euclidean(eye[2], eye[4])
        C = distance.euclidean(eye[0], eye[3])
        return (A + B) / (2.0 * C)
    
    
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    while True:
        ret, frame = cap.read()
    
        face_marks = face_recognition.face_landmarks(frame)
        # print(face_marks)
        if len(face_marks) > 0:
            left_eye = face_marks[0]['left_eye']
            # print(left_eye)
            left_eye_np = np.array(left_eye)
            # print(left_eye_np)
            right_eye = face_marks[0]['right_eye']
            right_eye_np = np.array(right_eye)
    
            left_EAR = eye_aspect_ratio(left_eye)
            right_EAR = eye_aspect_ratio(right_eye)
            eye_EAR_mean = (left_EAR + right_EAR) / 2.0
            print(eye_EAR_mean)
            leftEyeHull = cv2.convexHull(left_eye_np)
            rightEyeHull = cv2.convexHull(right_eye_np)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            if eye_EAR_mean < EAR_THRESH:
                blink_count += 1
                if blink_count > CONTINUE_FRAME:
                    print("眨眼测试成功")
                    blink_count = 0
                    break
    
        cv2.imshow("video", frame)
        key = cv2.waitKey(5)
        if key & 0xFF == ord('q'):
            break
    
    cap.release()
    cv2.destroyAllWindows()
  • 相关阅读:
    ubuntu 9.04更新源
    想学一下asp.net,跟着书本做了个bbs
    [转]ubuntu系统中遇到的一些问题及解决
    第一篇,打个招呼
    人际交往的书籍推荐
    程序员的五层境界,你在哪一层?
    HTTP报文之"请求报文"和"响应报文"详解
    如何提高你的工作效率?
    面对焦虑我们怎么办 ?
    CEO要看的书籍推荐
  • 原文地址:https://www.cnblogs.com/xuwenwei/p/14558420.html
Copyright © 2011-2022 走看看