使用AVCapTureSession 获取每一帧sampleBuffer

定义 全局 变量

 ///设备协调输入输出中心

AVCaptureSession *_captureSession;

///设备

AVCaptureDevice *_captureDevice;

/// 输入源

AVCaptureDeviceInput *_videoCaptureDeviceInput;

AVCaptureDeviceInput *_audioCaptureDeviceInput;

///  视频输出

AVCaptureVideoDataOutput *_captureVideoDataOutput;

/// 音频输出

AVCaptureAudioDataOutput *_captureAudioDataOutput;

/// 队列

dispatch_queue_t my_Queue;

/// 视频 连接

AVCaptureConnection *_videoConnection;

/// 音频连接

AVCaptureConnection *_audioConnection; 

// 用来显示 每一帧的 imageview

UIImageView *bufferImageView;

//写入路径

@property(nonatomic,copy)NSString *path;

/// 写入

@property(nonatomic,strong)AVAssetWriter *assetWriter;

@property(nonatomic,strong) AVAssetWriterInputPixelBufferAdaptor *adaptor;

/// 视频写入

@property(nonatomic,strong)AVAssetWriterInput *videoInput;

/// 音频写入

@property(nonatomic,strong)AVAssetWriterInput *audioInput;

- (void)initDevice {

bufferImageView = [[UIImageView alloc] initWithFrame:CGRectMake(0, 64, 375, 375)];

[self.view addSubview:bufferImageView];

_captureSession = [[AVCaptureSession alloc] init];

if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset640x480]) {

[_captureSession setSessionPreset:AVCaptureSessionPreset640x480];

}

// 获取后置 摄像头

_captureDevice = [self backCamera];

// 音频输入

AVCaptureDevice *audioCaptureDevice = [[AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio] firstObject];

_audioCaptureDeviceInput = [[AVCaptureDeviceInput alloc] initWithDevice:audioCaptureDevice error:nil];

//视频输入

_videoCaptureDeviceInput = [AVCaptureDeviceInput deviceInputWithDevice:_captureDevice error:nil];

[_captureSession addInput:_videoCaptureDeviceInput];

[_captureSession addInput:_audioCaptureDeviceInput];

[_captureDevice lockForConfiguration:nil];

[_captureDevice setActiveVideoMaxFrameDuration:CMTimeMake(1,15)];

[_captureDevice setActiveVideoMinFrameDuration:CMTimeMake(1,15)];

[_captureDevice unlockForConfiguration];

// 视频输出

_captureVideoDataOutput = [[AVCaptureVideoDataOutput alloc] init];

_captureVideoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA]

forKey:(id)kCVPixelBufferPixelFormatTypeKey];

[_captureSession addOutput:_captureVideoDataOutput];

my_Queue = dispatch_queue_create("myqueue", NULL);

[_captureVideoDataOutput setSampleBufferDelegate:self queue:my_Queue];

_captureVideoDataOutput.alwaysDiscardsLateVideoFrames = YES;

// 音频输出

_captureAudioDataOutput = [[AVCaptureAudioDataOutput alloc] init];

[_captureAudioDataOutput setSampleBufferDelegate:self queue:my_Queue];

[_captureSession addOutput:_captureAudioDataOutput];

/// 视频连接

_videoConnection = [_captureVideoDataOutput connectionWithMediaType:AVMediaTypeVideo];

_videoConnection.videoOrientation = AVCaptureVideoOrientationPortrait;

/// 音频连接

_audioConnection = [_captureAudioDataOutput connectionWithMediaType:AVMediaTypeAudio];

[_captureSession startRunning];

/// 创建的时候 最好调用一下 切换前后 摄像头,这样进来 从samplebuffer转化的iamge都是向左旋转了90度,不知道为什么,好像是因为 苹果默认横向录 是正方向,以后有机会再解决

}

实现代理

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{

if (captureOutput == _captureVideoDataOutput) { // 只有是视频帧 过来才操作

CFRetain(sampleBuffer);

  // 把samplebuffer 转化为图片 在方法里可做裁剪

UIImage *image = [self imageFromSampleBuffer:sampleBuffer];

  // 在这你可以对图片做一些算法操作

dispatch_async(dispatch_get_main_queue(), ^{

bufferImageView.image = image;

});

CFRelease(sampleBuffer);

}

#pramark 再写入文件

if(开始写入 == NO){

return;

}

BOOL isVideo = YES;

CFRetain(sampleBuffer);

if (captureOutput != _captureVideoDataOutput) {

isVideo = NO;

}

if (writer == nil && !isVideo) {

videoPath = [NSString stringWithFormat:@"%@/Documents/movie.mp4",NSHomeDirectory()];

CMFormatDescriptionRef fmt = CMSampleBufferGetFormatDescription(sampleBuffer);

const AudioStreamBasicDescription *asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt);

[[NSFileManager defaultManager] removeItemAtPath:videoPath error:nil];

[self initPath:videoPath videoWidth:480 videoHeight:480 channels:asbd->mChannelsPerFrame samples:asbd->mSampleRate];

}

if (CMSampleBufferDataIsReady(sampleBuffer)) {

if (writer.assetWriter.status == 0) {

if (isVideo == YES) {

[writer firstSamebuffer:sampleBuffer];

}

}

if (writer.assetWriter.status == 3) {

NSLog(@"写入失败 %@",writer.assetWriter.error);

}

if (isVideo) {

if (writer.videoInput.readyForMoreMediaData == YES) {

CMTime startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);

[writer encodeImageDataToVideo:resultImage time:startTime];

}

}

else{

[writer encodeAudioFrame:sampleBuffer];

}

CFRelease(sampleBuffer);

}

}

- (void)createWriter:(NSString *)path Width:(NSInteger)width Height:(NSInteger)height channels:(int)channels samples:(Float64)samples {

/// 创建writer

[[NSFileManager defaultManager] removeItemAtPath:path error:nil];

NSURL *pathUrl = [NSURL fileURLWithPath:path];

_assetWriter = [AVAssetWriter assetWriterWithURL:pathUrl fileType:AVFileTypeMPEG4 error:nil];

_assetWriter.shouldOptimizeForNetworkUse = YES;

[self initVideoInputHeight:height width:width];

[self initAudioInputChannels:channels samples:samples];

}

//初始化视频输入

- (void)initVideoInputHeight:(NSInteger)cy width:(NSInteger)cx {

//录制视频的一些配置,分辨率,编码方式等等

NSDictionary* settings = [NSDictionary dictionaryWithObjectsAndKeys:

AVVideoCodecH264, AVVideoCodecKey,

[NSNumber numberWithInteger: cx], AVVideoWidthKey,

[NSNumber numberWithInteger: cy], AVVideoHeightKey,

nil];

//初始化视频写入类

_videoInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:settings];

//表明输入是否应该调整其处理为实时数据源的数据

_videoInput.expectsMediaDataInRealTime = YES;

// 初始化写入图像类  add by david

[self initVideoInputAdaptor];

NSParameterAssert(_videoInput);

NSParameterAssert([_assetWriter canAddInput:_videoInput]);

//将视频输入源加入

[_assetWriter addInput:_videoInput];

}

// 初始化写入图像类

- (void)initVideoInputAdaptor

{

NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys:

[NSNumber numberWithInt:kCVPixelFormatType_32BGRA], kCVPixelBufferPixelFormatTypeKey, nil];

_adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:_videoInput

sourcePixelBufferAttributes:sourcePixelBufferAttributesDictionary];

}

//初始化音频输入

- (void)initAudioInputChannels:(int)ch samples:(Float64)rate {

//音频的一些配置包括音频各种这里为AAC,音频通道、采样率和音频的比特率

NSDictionary *settings = [NSDictionary dictionaryWithObjectsAndKeys:

[ NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey,

[ NSNumber numberWithInt: ch], AVNumberOfChannelsKey,

[ NSNumber numberWithFloat: rate], AVSampleRateKey,

[ NSNumber numberWithInt: 128000], AVEncoderBitRateKey,

nil];

//初始化音频写入类

_audioInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings:settings];

//表明输入是否应该调整其处理为实时数据源的数据

_audioInput.expectsMediaDataInRealTime = YES;

//将音频输入源加入

[_assetWriter addInput:_audioInput];

}

- (CVPixelBufferRef )pixelBufferFromCGImage:(CGImageRef)image size:(CGSize)size

{

NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:

[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,

[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];

CVPixelBufferRef pxbuffer = NULL;

CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width, size.height, kCVPixelFormatType_32BGRA, (__bridge CFDictionaryRef)options, &pxbuffer);

NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);

CVPixelBufferLockBaseAddress(pxbuffer, 0);

void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);

NSParameterAssert(pxdata != NULL);

CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();

CGContextRef context = CGBitmapContextCreate(pxdata, size.width, size.height, 8, 4*size.width, rgbColorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);

NSParameterAssert(context);

CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), image);

CGColorSpaceRelease(rgbColorSpace);

CGContextRelease(context);

CVPixelBufferUnlockBaseAddress(pxbuffer, 0);

return pxbuffer;

}

// 通过抽样缓存数据创建一个UIImage对象

- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer

{

// 为媒体数据设置一个CMSampleBuffer的Core Video图像缓存对象

CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);

// 锁定pixel buffer的基地址

CVPixelBufferLockBaseAddress(imageBuffer, 0);

// 得到pixel buffer的基地址

void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);

// 得到pixel buffer的行字节数

size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);

// 得到pixel buffer的宽和高

size_t width = CVPixelBufferGetWidth(imageBuffer);

size_t height = CVPixelBufferGetHeight(imageBuffer);

if (width == 0 || height == 0) {

return nil;

}

// 创建一个依赖于设备的RGB颜色空间

CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

// 用抽样缓存的数据创建一个位图格式的图形上下文(graphics context)对象

CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,

bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);

//

CGAffineTransform transform = CGAffineTransformIdentity;

CGContextConcatCTM(context, transform);

// 根据这个位图context中的像素数据创建一个Quartz image对象

CGImageRef quartzImage = CGBitmapContextCreateImage(context);

// 裁剪 图片

struct CGImage *cgImage = CGImageCreateWithImageInRect(quartzImage, CGRectMake(0, 0, height, height));

// 解锁pixel buffer

CVPixelBufferUnlockBaseAddress(imageBuffer,0);

// 释放context和颜色空间

CGContextRelease(context);

CGColorSpaceRelease(colorSpace);

// 用Quartz image创建一个UIImage对象image

UIImage *image = [UIImage imageWithCGImage:cgImage];

//    UIImage *image =  [UIImage imageWithCGImage:quartzImage scale:1.0 orientation:UIImageOrientationRight];

// 释放Quartz image对象

CGImageRelease(cgImage);

CGImageRelease(quartzImage);

//    NSLog(@"原来的%ld %f",(long)image.size.width,image.size.height);

//    image = [self image:image rotation:UIImageOrientationRight];

//    NSLog(@"变换过的%ld %f",(long)image.size.width,image.size.height);

//    image.imageOrientation = 2;

//    CGImageRelease(cgImage);

//    UIImage *resultImage = [[JBFaceDetectorHelper sharedInstance] rotateWithImage:image isFont:isFront];

return (image);

}

时间: 2024-10-18 15:37:05

使用AVCapTureSession 获取每一帧sampleBuffer的相关文章

根据视频地址获取某一帧的图像

http://blog.fuckbugs.me/category/ios/ //CatchImage.h #import <Foundation/Foundation.h> @interface CatchImage : NSObject /* *videoURL:视频地址(本地/网络) *time      :第N帧 */ + (UIImage*) thumbnailImageForVideo:(NSURL *)videoURL atTime:(NSTimeInterval)time; @e

Android -- 获取视频第一帧缩略图

干货 从API 8开始,新增了一个类: android.media.ThumbnailUtils这个类提供了3个静态方法一个用来获取视频第一帧得到的Bitmap,2个对图片进行缩略处理. public static Bitmap createVideoThumbnail (String filePath, int kind) 第一个参数是视频文件的路径,第二个参数是指定图片的大小,有两种选择Thumbnails.MINI_KIND与Thumbnails.MICRO_KIND. 第一种文档上说大小

Android之使用MediaMetadataRetriever类获取视频第一帧

一.首先,来介绍一下MediaMetadataRetriever类,此类位于android.media包下,这里,先附上可查看此类的API地址:MediaMetadataRetriever类.大家能够自行查看. 1.MediaMetadataRetriever类概述:MediaMetadataRetriever class provides a unified interface for retrieving frame and meta data from an input media fil

通过 ffmpeg 获取视频第一帧(指定时间)图片

最近做一个上传教学视频的方法,上传视频的同时需要上传视频缩略图,为了避免用户上传的缩略图与视频内容不符,经理要求直接从上传的视频中截图视频的某一帧作为缩略图,并给我推荐了FFMPEG.FFMPEG 功能很强大,做视频必备的软件. FFMPEG下载地址:https://ffmpeg.org/download.html 1.VideoThumbTaker.java 获取视频指定播放时间的图片 package video;import java.io.IOException;import java.i

Android之使用ThumbnailUtils类来获取视频第一帧缩略图

一.首先,来介绍ThumbnailUtils类,此类位于android.media包下,此类有一个公有的无参构造函数,有三个静态的公有方法,一个用来获取视频第一帧得到的Bitmap,另外两个方法用来对图片进行缩略处理. 1.无参的构造函数:ThumbnailUtils() 2.三个静态的公有方法: (1).创建一个视频缩略图的方法: public static Bitmap createVideoThumbnail(String filePath, int kind) 第一个参数为视频文件的

关于video标签移动端开发遇到的问题,获取视频第一帧,全屏,自动播放,自适应等问题

最近一直在处理video标签在IOS和Android端的兼容问题,其中遇到不少坑,绝大多数问题已经解决,下面是处理问题经验的总结: 1.获取视频的第一帧作为背景图: 技术:canvas绘图 window.onload = function(){ var video = document.getElementById('video'); //使用严格模式 'use strict'; //第一帧图片与原视频的比例 var scal = 0.8; //监听页面加载事件 video.addEventLi

java获取视频第一帧工具类

java获取视频文件第一帧,为了防止黑画面,此例取第五帧,本例子采用maven形式 1. pom.xml 添加引用jar包 jdk采用1.8,日志自行添加即可,这里使用的是log4j2 <!-- log4j2 --> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-core</artifactId> <version>2.

获取视频的帧

1 本地文件 MediaMetadataRetriever  ---android API 10 之前有 不对外开放 -可以反射 http://developer.android.com/reference/android/media/MediaMetadataRetriever.html private void initView() {     imgPic = (ImageView) findViewById(R.id.img_pic);     seekbar = (SeekBar) f

ios根据视频地址获取某一帧的图像

//CatchImage.h #import <Foundation/Foundation.h> @interface CatchImage : NSObject /* *videoURL:视频地址(本地/网络) *time      :第N帧 */ + (UIImage*) thumbnailImageForVideo:(NSURL *)videoURL atTime:(NSTimeInterval)time; @end //CatchImage.m #import "CatchI