zoukankan      html  css  js  c++  java
  • swift通过摄像头读取每一帧的图片,并且做识别做人脸识别

    最近帮别人做一个项目,主要是使用摄像头做人脸识别

    github地址:https://github.com/qugang/AVCaptureVideoTemplate

    要使用IOS的摄像头,需要使用AVFoundation 库,库里面的东西我就不介绍。

    启动摄像头需要使用AVCaptureSession 类。

    然后得到摄像头传输的每一帧数据,需要使用AVCaptureVideoDataOutputSampleBufferDelegate 委托。

    首先在viewDidLoad 里添加找摄像头设备的代码,找到摄像头设备以后,开启摄像头

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    captureSession.sessionPreset = AVCaptureSessionPresetLow
    let devices = AVCaptureDevice.devices()
    for device in devices {
      if (device.hasMediaType(AVMediaTypeVideo)) {
        if (device.position == AVCaptureDevicePosition.Front) {
          captureDevice = device as?AVCaptureDevice
          if captureDevice != nil {
            println("Capture Device found")
            beginSession()
          }
        }
      }
    }

    beginSession,开启摄像头:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    func beginSession() {
      var err : NSError? = nil
      captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
      let output = AVCaptureVideoDataOutput()
      let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL)
      output.setSampleBufferDelegate(self, queue: cameraQueue)
      output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]
      captureSession.addOutput(output)
      if err != nil {
        println("error: (err?.localizedDescription)")
      }
      previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
      previewLayer?.videoGravity = "AVLayerVideoGravityResizeAspect"
      previewLayer?.frame = self.view.bounds
      self.view.layer.addSublayer(previewLayer)
      captureSession.startRunning()
    }

    开启以后,实现captureOutput 方法:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
      if(self.isStart)
      {
        let resultImage = sampleBufferToImage(sampleBuffer)
        let context = CIContext(options:[kCIContextUseSoftwareRenderer:true])
        let detecotr = CIDetector(ofType:CIDetectorTypeFace,  context:context, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh])
        let ciImage = CIImage(image: resultImage)
        let results:NSArray = detecotr.featuresInImage(ciImage,options: ["CIDetectorImageOrientation" : 6])
        for r in results {
          let face:CIFaceFeature = r as! CIFaceFeature;
          let faceImage = UIImage(CGImage: context.createCGImage(ciImage, fromRect: face.bounds),scale: 1.0, orientation: .Right)
          NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y,pickUIImager.frame.origin.x, pickUIImager.frame.origin.y)
          dispatch_async(dispatch_get_main_queue()) {
            if (self.isStart)
            {
              self.dismissViewControllerAnimated(true, completion: nil)
              self.didReceiveMemoryWarning()
              self.callBack!(face: faceImage!)
            }
            self.isStart = false
          }
        }
      }
    }

    在每一帧图片上使用CIDetector 得到人脸,CIDetector 还可以得到眨眼,与微笑的人脸,如果要详细使用去官方查看API

    上面就是关键代码,设置了有2秒的延迟,2秒之后开始人脸检测。

    全部代码:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    //
    //  ViewController.swift
    //  AVSessionTest
    //
    //  Created by qugang on 15/7/8.
    //  Copyright (c) 2015年 qugang. All rights reserved.
    //
     
    import UIKit
    import AVFoundation
    class AVCaptireVideoPicController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
      var callBack :((face: UIImage) ->())?
      let captureSession = AVCaptureSession()
      var captureDevice : AVCaptureDevice?
      var previewLayer : AVCaptureVideoPreviewLayer?
      var pickUIImager : UIImageView = UIImageView(image: UIImage(named: "pick_bg"))
      var line : UIImageView = UIImageView(image: UIImage(named: "line"))
      var timer : NSTimer!
      var upOrdown = true
      var isStart = false
      override func viewDidLoad() {
        super.viewDidLoad()
        captureSession.sessionPreset = AVCaptureSessionPresetLow
        let devices = AVCaptureDevice.devices()
        for device in devices {
          if (device.hasMediaType(AVMediaTypeVideo)) {
            if (device.position == AVCaptureDevicePosition.Front) {
              captureDevice = device as?AVCaptureDevice
              if captureDevice != nil {
                println("Capture Device found")
                beginSession()
              }
            }
          }
        }
        pickUIImager.frame = CGRect(x: self.view.bounds.width / 2 - 100, y: self.view.bounds.height / 2 - 100, 200,height: 200)
        line.frame = CGRect(x: self.view.bounds.width / 2 - 100, y: self.view.bounds.height / 2 - 100, 200, height: 2)
        self.view.addSubview(pickUIImager)
        self.view.addSubview(line)
        timer =  NSTimer.scheduledTimerWithTimeInterval(0.01, target: self, selector: "animationSate", userInfo: nil, repeats: true)
         
        NSTimer.scheduledTimerWithTimeInterval(2, target: self, selector: "isStartTrue", userInfo: nil, repeats: false)
      }
      func isStartTrue(){
        self.isStart = true
      }
      override func didReceiveMemoryWarning(){
        super.didReceiveMemoryWarning()
        captureSession.stopRunning()
      }
       
      func animationSate(){
        if upOrdown {
          if (line.frame.origin.y >= pickUIImager.frame.origin.y + 200)
          {
            upOrdown = false
          }
          else
          {
            line.frame.origin.y += 2
          }
        } else {
          if (line.frame.origin.y <= pickUIImager.frame.origin.y)
          {
            upOrdown = true
          }
          else
          {
            line.frame.origin.y -= 2
          }
        }
      }
      func beginSession() {
        var err : NSError? = nil
        captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
        let output = AVCaptureVideoDataOutput()
        let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL)
        output.setSampleBufferDelegate(self, queue: cameraQueue)
        output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]
        captureSession.addOutput(output)
        if err != nil {
          println("error: (err?.localizedDescription)")
        }
        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        previewLayer?.videoGravity = "AVLayerVideoGravityResizeAspect"
        previewLayer?.frame = self.view.bounds
        self.view.layer.addSublayer(previewLayer)
        captureSession.startRunning()
      }
      func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
        if(self.isStart)
        {
          let resultImage = sampleBufferToImage(sampleBuffer)
          let context = CIContext(options:[kCIContextUseSoftwareRenderer:true])
          let detecotr = CIDetector(ofType:CIDetectorTypeFace,  context:context, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh])
          let ciImage = CIImage(image: resultImage)
          let results:NSArray = detecotr.featuresInImage(ciImage,options: ["CIDetectorImageOrientation" : 6])
          for r in results {
            let face:CIFaceFeature = r as! CIFaceFeature;
            let faceImage = UIImage(CGImage: context.createCGImage(ciImage, fromRect: face.bounds),scale: 1.0, orientation: .Right)
            NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y,pickUIImager.frame.origin.x, pickUIImager.frame.origin.y)
            dispatch_async(dispatch_get_main_queue()) {
              if (self.isStart)
              {
                self.dismissViewControllerAnimated(true, completion: nil)
                self.didReceiveMemoryWarning()
                self.callBack!(face: faceImage!)
              }
              self.isStart = false
            }
          }
        }
      }
      private func sampleBufferToImage(sampleBuffer: CMSampleBuffer!) -> UIImage {
        let imageBuffer: CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer)
        CVPixelBufferLockBaseAddress(imageBuffer, 0)
        let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0)
        let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
        let width = CVPixelBufferGetWidth(imageBuffer)
        let height = CVPixelBufferGetHeight(imageBuffer)
        let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()
        let bitsPerCompornent = 8
        var bitmapInfo = CGBitmapInfo((CGBitmapInfo.ByteOrder32Little.rawValue | CGImageAlphaInfo.PremultipliedFirst.rawValue) as UInt32)
        let newContext = CGBitmapContextCreate(baseAddress, width, height, bitsPerCompornent, bytesPerRow, colorSpace, bitmapInfo) as CGContextRef
        let imageRef: CGImageRef = CGBitmapContextCreateImage(newContext)
        let resultImage = UIImage(CGImage: imageRef, scale: 1.0, orientation: UIImageOrientation.Right)!
        return resultImage
      }
      func imageResize (imageObj:UIImage, sizeChange:CGSize)-> UIImage{
        let hasAlpha = false
        let scale: CGFloat = 0.0
         
        UIGraphicsBeginImageContextWithOptions(sizeChange, !hasAlpha, scale)
        imageObj.drawInRect(CGRect(origin: CGPointZero, size: sizeChange))
        let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
        return scaledImage
      }
    }
  • 相关阅读:
    HTML_项目符号使用图片
    字符串查找和替换接口
    AOP代理分析
    3星|董藩《房地产的逻辑》:应该鼓励开发商多盖房而不是惩罚开发商
    2.5星|郎咸平《拯救世界的经济学》:各发达国家与中国的福利政策、经济干预政策的前世今生
    3.5星|科特勒《营销革命4.0》:打造无缝衔接的线上和线下体验
    3星|《韩国式资本主义》:财阀祸害韩国,韩国需要正义的资本主义
    4星|《特朗普时代的全球化战略》:管理学界和管理者可能严重低估了核心管理实践的价值
    2星|《内容创业》:知识付费行业的公开资料整理汇编
    3星|《身边的博弈》:10年旧书,博弈论科普和习题讲解
  • 原文地址:https://www.cnblogs.com/Free-Thinker/p/5106330.html
Copyright © 2011-2022 走看看