IOS 通过摄像头读取每一帧的图片,并且做识别做人脸识别(swift)

最近帮别人做一个项目,主要是使用摄像头做人脸识别

github地址:https://github.com/qugang/AVCaptureVideoTemplate

要使用IOS的摄像头,需要使用AVFoundation 库,库里面的东西我就不介绍。

启动摄像头需要使用AVCaptureSession 类。

然后得到摄像头传输的每一帧数据,需要使用AVCaptureVideoDataOutputSampleBufferDelegate 委托。

首先在viewDidLoad 里添加找摄像头设备的代码,找到摄像头设备以后,开启摄像头

        captureSession.sessionPreset = AVCaptureSessionPresetLow
        let devices = AVCaptureDevice.devices()
        for device in devices {
            if (device.hasMediaType(AVMediaTypeVideo)) {
                if (device.position == AVCaptureDevicePosition.Front) {
                    captureDevice = device as?AVCaptureDevice
                    if captureDevice != nil {
                        println("Capture Device found")
                        beginSession()
                    }
                }
            }
        }

beginSession,开启摄像头:

func beginSession() {
        var err : NSError? = nil
        captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
        let output = AVCaptureVideoDataOutput()

        let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL)
        output.setSampleBufferDelegate(self, queue: cameraQueue)
        output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]
        captureSession.addOutput(output)

        if err != nil {
            println("error: \(err?.localizedDescription)")
        }
        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        previewLayer?.videoGravity = "AVLayerVideoGravityResizeAspect"
        previewLayer?.frame = self.view.bounds
        self.view.layer.addSublayer(previewLayer)

        captureSession.startRunning()
    }

开启以后,实现captureOutput 方法:

func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {

        if(self.isStart)
        {
            let resultImage = sampleBufferToImage(sampleBuffer)

            let context = CIContext(options:[kCIContextUseSoftwareRenderer:true])
            let detecotr = CIDetector(ofType:CIDetectorTypeFace,  context:context, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh])

            let ciImage = CIImage(image: resultImage)

            let results:NSArray = detecotr.featuresInImage(ciImage,options: ["CIDetectorImageOrientation" : 6])

            for r in results {
                let face:CIFaceFeature = r as! CIFaceFeature;
                let faceImage = UIImage(CGImage: context.createCGImage(ciImage, fromRect: face.bounds),scale: 1.0, orientation: .Right)

                NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y,pickUIImager.frame.origin.x, pickUIImager.frame.origin.y)

                dispatch_async(dispatch_get_main_queue()) {
                    if (self.isStart)
                    {
                        self.dismissViewControllerAnimated(true, completion: nil)
                        self.didReceiveMemoryWarning()

                        self.callBack!(face: faceImage!)
                    }
                    self.isStart = false
                }
            }
        }
    }

在每一帧图片上使用CIDetector 得到人脸,CIDetector 还可以得到眨眼,与微笑的人脸,如果要详细使用去官方查看API

上面就是关键代码,设置了有2秒的延迟,2秒之后开始人脸检测。

全部代码:

//
//  ViewController.swift
//  AVSessionTest
//
//  Created by qugang on 15/7/8.
//  Copyright (c) 2015年 qugang. All rights reserved.
//

import UIKit
import AVFoundation

class AVCaptireVideoPicController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {

    var callBack :((face: UIImage) ->())?
    let captureSession = AVCaptureSession()
    var captureDevice : AVCaptureDevice?
    var previewLayer : AVCaptureVideoPreviewLayer?
    var pickUIImager : UIImageView = UIImageView(image: UIImage(named: "pick_bg"))
    var line : UIImageView = UIImageView(image: UIImage(named: "line"))
    var timer : NSTimer!
    var upOrdown = true
    var isStart = false

    override func viewDidLoad() {
        super.viewDidLoad()

        captureSession.sessionPreset = AVCaptureSessionPresetLow
        let devices = AVCaptureDevice.devices()
        for device in devices {
            if (device.hasMediaType(AVMediaTypeVideo)) {
                if (device.position == AVCaptureDevicePosition.Front) {
                    captureDevice = device as?AVCaptureDevice
                    if captureDevice != nil {
                        println("Capture Device found")
                        beginSession()
                    }
                }
            }
        }
        pickUIImager.frame = CGRect(x: self.view.bounds.width / 2 - 100, y: self.view.bounds.height / 2 - 100,width: 200,height: 200)
        line.frame = CGRect(x: self.view.bounds.width / 2 - 100, y: self.view.bounds.height / 2 - 100, width: 200, height: 2)
        self.view.addSubview(pickUIImager)
        self.view.addSubview(line)
        timer =  NSTimer.scheduledTimerWithTimeInterval(0.01, target: self, selector: "animationSate", userInfo: nil, repeats: true)

        NSTimer.scheduledTimerWithTimeInterval(2, target: self, selector: "isStartTrue", userInfo: nil, repeats: false)
    }

    func isStartTrue(){
        self.isStart = true
    }

    override func didReceiveMemoryWarning(){
        super.didReceiveMemoryWarning()
        captureSession.stopRunning()

    }

    func animationSate(){
        if upOrdown {
            if (line.frame.origin.y >= pickUIImager.frame.origin.y + 200)
            {
                upOrdown = false
            }
            else
            {
                line.frame.origin.y += 2
            }
        } else {
            if (line.frame.origin.y <= pickUIImager.frame.origin.y)
            {
                upOrdown = true
            }
            else
            {
                line.frame.origin.y -= 2
            }
        }
    }

    func beginSession() {
        var err : NSError? = nil
        captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
        let output = AVCaptureVideoDataOutput()

        let cameraQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL)
        output.setSampleBufferDelegate(self, queue: cameraQueue)
        output.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]
        captureSession.addOutput(output)

        if err != nil {
            println("error: \(err?.localizedDescription)")
        }
        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        previewLayer?.videoGravity = "AVLayerVideoGravityResizeAspect"
        previewLayer?.frame = self.view.bounds
        self.view.layer.addSublayer(previewLayer)

        captureSession.startRunning()
    }
    func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {

        if(self.isStart)
        {
            let resultImage = sampleBufferToImage(sampleBuffer)

            let context = CIContext(options:[kCIContextUseSoftwareRenderer:true])
            let detecotr = CIDetector(ofType:CIDetectorTypeFace,  context:context, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh])

            let ciImage = CIImage(image: resultImage)

            let results:NSArray = detecotr.featuresInImage(ciImage,options: ["CIDetectorImageOrientation" : 6])

            for r in results {
                let face:CIFaceFeature = r as! CIFaceFeature;
                let faceImage = UIImage(CGImage: context.createCGImage(ciImage, fromRect: face.bounds),scale: 1.0, orientation: .Right)

                NSLog("Face found at (%f,%f) of dimensions %fx%f", face.bounds.origin.x, face.bounds.origin.y,pickUIImager.frame.origin.x, pickUIImager.frame.origin.y)

                dispatch_async(dispatch_get_main_queue()) {
                    if (self.isStart)
                    {
                        self.dismissViewControllerAnimated(true, completion: nil)
                        self.didReceiveMemoryWarning()

                        self.callBack!(face: faceImage!)
                    }
                    self.isStart = false
                }
            }
        }
    }
    private func sampleBufferToImage(sampleBuffer: CMSampleBuffer!) -> UIImage {
        let imageBuffer: CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer)
        CVPixelBufferLockBaseAddress(imageBuffer, 0)
        let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0)

        let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
        let width = CVPixelBufferGetWidth(imageBuffer)
        let height = CVPixelBufferGetHeight(imageBuffer)

        let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()

        let bitsPerCompornent = 8
        var bitmapInfo = CGBitmapInfo((CGBitmapInfo.ByteOrder32Little.rawValue | CGImageAlphaInfo.PremultipliedFirst.rawValue) as UInt32)

        let newContext = CGBitmapContextCreate(baseAddress, width, height, bitsPerCompornent, bytesPerRow, colorSpace, bitmapInfo) as CGContextRef

        let imageRef: CGImageRef = CGBitmapContextCreateImage(newContext)
        let resultImage = UIImage(CGImage: imageRef, scale: 1.0, orientation: UIImageOrientation.Right)!

        return resultImage
    }

    func imageResize (imageObj:UIImage, sizeChange:CGSize)-> UIImage{

        let hasAlpha = false
        let scale: CGFloat = 0.0 

        UIGraphicsBeginImageContextWithOptions(sizeChange, !hasAlpha, scale)
        imageObj.drawInRect(CGRect(origin: CGPointZero, size: sizeChange))

        let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
        return scaledImage
    }
}
时间: 2024-10-09 08:44:37

IOS 通过摄像头读取每一帧的图片,并且做识别做人脸识别(swift)的相关文章

ios中摄像头/相册获取图片,压缩图片,上传服务器方法总结

相册 iphone的相册包含摄像头胶卷+用户计算机同步的部分照片.用户可以通过UIImagePickerController类提供的交互对话框来从相册中选择图像.但是,注意:相册中的图片机器路径无法直接从应用程序访问,只能通过终端用户去选择和使用相册图片 应用程序包 应用程序包可能会将图像与可执行程序.Info.plist文件和其他资源一同存储.我们可以通过本地文件路径来读取这些基于包的图像并在应用程序中显示它们. 沙盒 借助沙盒,我们可以把图片存储到Documents.Library.tmp文

基于OpenCV读取摄像头进行人脸检测和人脸识别

前段时间使用OpenCV的库函数实现了人脸检测和人脸识别,笔者的实验环境为VS2010+OpenCV2.4.4,opencv的环境配置网上有很多,不再赘述.检测的代码网上很多,记不清楚从哪儿copy的了,识别的代码是从OpenCV官网上找到的:http://docs.opencv.org/trunk/modules/contrib/doc/facerec/facerec_api.html 需要注意的是,opencv的FaceRecogizer目前有三个类实现了它,特征脸和fisherface方法

iOS通过app读取通讯录信息(整理)

iOS通过app读取通讯录信息,读取通讯录信息时需要加载AddressBookUI 和AddressBook两个包,并且引入头文件 #import <AddressBook/AddressBook.h> #import <AddressBookUI/AddressBookUI.h> 具体实现如下: -(void)readAllPeoples { //定义通讯录名字为addressbook ABAddressBookRef tmpAddressBook = nil; //根据系统版本

【EMGU】读取视频AVI文件+保存任意帧为图片

文章简介:对拍取的视频需要手动截取或者分帧截取图片,本文拟实现此功能.可多次.任意帧截取. 操作说明:空格键开始截取,回车键退出视频. 开发环境:EMGU2.4.9 + VS2010 +Win7(64). public void VideoCatch(string filepath) { try { IntPtr CatchFrame =CvInvoke.cvCreateFileCapture(filepath); CvInvoke.cvNamedWindow("打开视频"); //

OpenCV摄像头人脸识别

注: 从外设摄像装置中获取图像帧,把每帧的图片与人脸特征进行匹配,用方框框住识别出来的人脸 需要用到的函数: CvHaarClassifierCascade* cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size ); 第一个参数:训练好的级联分类器的路径 第二个参数:级联分类器训练中采用的检测目标的尺寸 #include "stdafx.h" #include "iostrea

OpenCV视频读取播放,视频转换为图片

转载请注明出处!!! http://blog.csdn.net/zhonghuan1992 OpenCV视频读取播放,视频转换为图片 介绍几个有关视频读取的函数: VideoCapture::VideoCapture VideoCapture能够从文件里或者摄像头中读取视频,这是提供给C++的接口的.C的接口是CvCapture结构.        <span style="white-space:pre"> </span>C++: VideoCapture::

ios的UIImage的两种不同的图片加载方式 tom猫

在ios的UI交互设计时,对图片的处理是难免的:不同的处理方式会对内存有不同的影响: ************************************************************ a:图片格式及NSBundle加载全路径: 1>xcode或者说苹果官方是极力推荐使用的图片格式是png 2>所有如果项目中用得是png的图片,则不用写后缀名 3>其他格式要求后缀名,特别是用UIImage加载图片时 NSBundle加载全路径的常用代码: ? 1 2 3 4 //

利用face_recognition,dlib与OpenCV调用摄像头进行人脸识别

用已经搭建好 face_recognition,dlib 环境来进行人脸识别 未搭建好环境请参考:https://www.cnblogs.com/guihua-pingting/p/12201077.html 使用OpenCV 调用摄像头 import face_recognition import cv2 video_capture = cv2.VideoCapture(0) # VideoCapture打开摄像头,0为笔记本内置摄像头,1为外USB摄像头,或写入视频路径 mayun_img

结合活体检测,人脸识别在Android、IOS、服务器中的应用

随着深度学习方法的应用,人工智能的发展,人脸识别技术的识别率已经得到质的提升,通过反复开发试验,目前我司的人脸识别技术率已经达到99%.人脸识别技术与其他生物特征识别技术相吃比,在实际应用中具有天然独到的优势:通过摄像头直接获取,在非接触的方式完成识别过程.通过人脸识别与证件识别的比对,目前我司的人脸识别技术已应用在金融.教育.景区.旅运.社保等领域. 人脸识别技术简介 人脸识别技术主要分为两部分: 第一部为前端人脸活体检测技术,主要支持android.ios平台,在前端通过眨眼.张嘴.摇头.点