IOS语音录取

在IOS中,在做语音识别中,需要对语音进行抓取。

#import "GetAudioViewController.h"

#import <AVFoundation/AVFoundation.h>

#import <UIKit/UIKit.h>

#import <ImageIO/ImageIO.h>

#import <MobileCoreServices/MobileCoreServices.h>

#import <QuartzCore/QuartzCore.h>

@interface GetAudioViewController ()

{

AVAudioPlayer *_player;

AVAudioRecorder *_audiorecord;

NSTimer* _timerForPitch;

CAShapeLayer *_shapeLayer;

CADisplayLink* _displayLink;

__weak IBOutlet UIProgressView *_audioPower;

__weak IBOutlet UIButton *_record;

__weak IBOutlet UIButton *_pause;

__weak IBOutlet UIButton *_resume;

__weak IBOutlet UIButton *_stop;

__weak IBOutlet UIView *_viewForWave;

float Pitch;

NSInteger _recordEncoding;

CFTimeInterval _firstTimestamp;

NSInteger _loopCount;

}

@end

@implementation GetAudioViewController

- (void)viewDidLoad {

[super viewDidLoad];

}

-(void)cratePath:(NSString*)path

{

NSFileManager* filemanager = [NSFileManager defaultManager];

if(![filemanager fileExistsAtPath:path])

[filemanager createDirectoryAtPath:path

withIntermediateDirectories:YES

attributes:nil

error:nil];

}

- (UIBezierPath *)pathAtInterval:(NSTimeInterval) interval

{

UIBezierPath *path = [UIBezierPath bezierPath];

[path moveToPoint:CGPointMake(0, _viewForWave.bounds.size.height / 2.0)];

CGFloat fractionOfSecond = interval - floor(interval);

CGFloat yOffset = _viewForWave.bounds.size.height * sin(fractionOfSecond * M_PI * Pitch*8);

[path addCurveToPoint:CGPointMake(_viewForWave.bounds.size.width, _viewForWave.bounds.size.height / 2.0)

controlPoint1:CGPointMake(_viewForWave.bounds.size.width / 2.0, _viewForWave.bounds.size.height / 2.0 - yOffset)

controlPoint2:CGPointMake(_viewForWave.bounds.size.width / 2.0, _viewForWave.bounds.size.height / 2.0 + yOffset)];

return path;

}

- (void)addShapeLayer

{

_shapeLayer = [CAShapeLayer layer];

_shapeLayer.path = [[self pathAtInterval:2.0] CGPath];

_shapeLayer.fillColor = [[UIColor redColor] CGColor];

_shapeLayer.lineWidth = 1.0;

_shapeLayer.strokeColor = [[UIColor whiteColor] CGColor];

[_viewForWave.layer addSublayer:_shapeLayer];

}

- (void)handleDisplayLink:(CADisplayLink *)displayLink

{

if (!_firstTimestamp)

_firstTimestamp = displayLink.timestamp;

_loopCount++;

NSTimeInterval elapsed = (displayLink.timestamp - _firstTimestamp);

_shapeLayer.path = [[self pathAtInterval:elapsed] CGPath];

}

- (void)startDisplayLink

{

_displayLink = [CADisplayLink displayLinkWithTarget:self selector:@selector(handleDisplayLink:)];

[_displayLink addToRunLoop:[NSRunLoop currentRunLoop] forMode:NSDefaultRunLoopMode];

}

- (IBAction)recordClick:(id)sender {

_viewForWave.hidden = NO;

[self addShapeLayer];

[self startDisplayLink];

NSLog(@"startRecording");

_audiorecord = nil;

AVAudioSession *audioSession = [AVAudioSession sharedInstance];

[audioSession setCategory:AVAudioSessionCategoryRecord error:nil];

NSMutableDictionary *recordSettings = [[NSMutableDictionary alloc] initWithCapacity:10];

if(_recordEncoding == 6)

{

[recordSettings setObject:[NSNumber numberWithInt: kAudioFormatLinearPCM] forKey: AVFormatIDKey];

[recordSettings setObject:[NSNumber numberWithFloat:44100.0] forKey: AVSampleRateKey];

[recordSettings setObject:[NSNumber numberWithInt:2] forKey:AVNumberOfChannelsKey];

[recordSettings setObject:[NSNumber numberWithInt:16] forKey:AVLinearPCMBitDepthKey];

[recordSettings setObject:[NSNumber numberWithBool:NO] forKey:AVLinearPCMIsBigEndianKey];

[recordSettings setObject:[NSNumber numberWithBool:NO] forKey:AVLinearPCMIsFloatKey];

}

else

{

NSNumber *formatObject;

switch (_recordEncoding) {

case 1:

formatObject = [NSNumber numberWithInt: kAudioFormatMPEG4AAC];

break;

case 2:

formatObject = [NSNumber numberWithInt: kAudioFormatAppleLossless];

break;

case 3:

formatObject = [NSNumber numberWithInt: kAudioFormatAppleIMA4];

break;

case 4:

formatObject = [NSNumber numberWithInt: kAudioFormatiLBC];

break;

case 5:

formatObject = [NSNumber numberWithInt: kAudioFormatULaw];

break;

default:

formatObject = [NSNumber numberWithInt: kAudioFormatAppleIMA4];

}

[recordSettings setObject:formatObject forKey: AVFormatIDKey];

[recordSettings setObject:[NSNumber numberWithFloat:44100.0] forKey: AVSampleRateKey];

[recordSettings setObject:[NSNumber numberWithInt:2] forKey:AVNumberOfChannelsKey];

[recordSettings setObject:[NSNumber numberWithInt:12800] forKey:AVEncoderBitRateKey];

[recordSettings setObject:[NSNumber numberWithInt:16] forKey:AVLinearPCMBitDepthKey];

[recordSettings setObject:[NSNumber numberWithInt: AVAudioQualityHigh] forKey: AVEncoderAudioQualityKey];

}

NSArray *dirPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);

NSString *docsDir = [dirPaths objectAtIndex:0];

NSString *soundFilePath = [docsDir

stringByAppendingPathComponent:@"recordTest.caf"];

NSURL *url = [NSURL fileURLWithPath:soundFilePath];

NSError *error = nil;

_audiorecord = [[ AVAudioRecorder alloc] initWithURL:url settings:recordSettings error:&error];

_audiorecord.meteringEnabled = YES;

if ([_audiorecord prepareToRecord] == YES){

_audiorecord.meteringEnabled = YES;

[_audiorecord record];

_timerForPitch =[NSTimer scheduledTimerWithTimeInterval: 0.01 target: self selector: @selector(levelTimerCallback:) userInfo: nil repeats: YES];

}else {

//int errorCode = CFSwapInt32HostToBig ([error code]);

//NSLog(@"Error: %@ [%4.4s])" , [error localizedDescription], (char*)&errorCode);

}

}

- (void)levelTimerCallback:(NSTimer *)timer {

[_audiorecord updateMeters];

//    float linear = pow (10, [_audiorecord peakPowerForChannel:0] / 20);

float linear1 = pow (10, [_audiorecord averagePowerForChannel:0] / 20);

if (linear1>0.03) {

Pitch = linear1+.20;//pow (10, [audioRecorder averagePowerForChannel:0] / 20);//[audioRecorder peakPowerForChannel:0];

}

else {

Pitch = 0.0;

}

//    //Pitch =linear1;

//    NSLog(@"Pitch==%f",Pitch);

//    _customRangeBar.value = Pitch;//linear1+.30;

[_audioPower setProgress:Pitch];

//    float minutes = floor(_audiorecord.currentTime/60);

//    float seconds = _audiorecord.currentTime - (minutes * 60);

//    NSString *time = [NSString stringWithFormat:@"%0.0f.%0.0f",minutes, seconds];

//    [self.statusLabel setText:[NSString stringWithFormat:@"%@ sec", time]];

//    NSLog(@"recording");

}

- (IBAction)pauseClick:(id)sender {

NSLog(@"stopRecording");

// kSeconds = 0.0;

_viewForWave.hidden = YES;

[_audiorecord stop];

[self stopDisplayLink];

_shapeLayer.path = [[self pathAtInterval:0] CGPath];

[_timerForPitch invalidate];

_timerForPitch = nil;

}

- (void)stopDisplayLink

{

[_displayLink invalidate];

_displayLink = nil;

}

- (IBAction)resumeClick:(id)sender {

AVAudioSession *audioSession = [AVAudioSession sharedInstance];

[audioSession setCategory:AVAudioSessionCategoryPlayback error:nil];

NSArray *dirPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);

NSString *docsDir = [dirPaths objectAtIndex:0];

NSString *soundFilePath = [docsDir stringByAppendingPathComponent:@"recordTest.caf"];

NSURL *url = [NSURL fileURLWithPath:soundFilePath];

NSError *error;

_player = [[AVAudioPlayer alloc] initWithContentsOfURL:url error:&error];

_player.numberOfLoops = 0;

[_player play];

}

- (IBAction)stopClick:(id)sender {

[_player stop];

}

- (void)didReceiveMemoryWarning {

[super didReceiveMemoryWarning];

}

@end

代码全部在这里了。其中

_viewForWave是一个波段的反应图。有兴趣的朋友可以自己去写一个,美化,参考我的写的话,记得给我留言。

时间: 2024-08-01 22:35:26

IOS语音录取的相关文章

iOS语音

[objc] view plain copy <span style="white-space:pre">    </span>语音技术近来可是出遍了风头,从iphone4s的siri,到微信的语音聊天等等,极大地方便了人们的社交生活,也体现了当今移动科技发展的迅猛.当然,作为一位移动开发的从业人员怎能落伍呢!今天我们就来简单的实现一下语音聊天的功能. [objc] view plain copy <span style="white-space

iOS - 语音云通讯

iOS SDK 2.0 语音及图片消息详解本文档将详细介绍融云的语音及图片消息接口功能及使用说明.阅读本文前,我们假设您已经阅读了融云 iOS 开发指南,并掌握融云 SDK 的基本用法. 语音消息用来发送语音片段消息,您可以通过融云客户端 IMLib 接口或 Server API 接口发送语音消息.如果您使用的是融云 IMKit 则该功能已经在 SDK 中封装好,直接使用即可.以下为通过融云 IMLib 及 Server API 发送语音消息的方法. 从客户端发送消息获取要发送的语音数据 wav

iOS语音书写功能(语音转文本)

Demo下载地址 最近在项目开发中,需要将语音识别转换成文本的功能.研究了下科大讯飞,附上Demo分享给大家. 研发前先得做一些准备. 1.注册科大讯飞开发者帐号(http://www.xfyun.cn) 2.下载开发平台(iOS.或android,或其他)所需要的SDK(SDK包含:说明文档.SDK即iflyMSC.framework.Demo) 3.项目中添加SDK(添加时,先将SDK复制粘贴到项目文件,再通过addframe的方法添加到项目引用),及相关联的framework 添加方法:T

ios语音输入崩溃

游戏中任何可以输入的地方,只要调用语音输入,必然会导致app崩溃,解决方法如下: ok, so essentially the gist of it is that siri wants gl context and to be rendered alongside your view. So you need to play nice with it.first of all in Classes/Unity/EAGLContextHelper.hadd forward declaration

iOS语音播放之切换听筒和扬声器的方法解决方案

[[UIDevice currentDevice] setProximityMonitoringEnabled:YES]; //建议在播放之前设置yes,播放结束设置NO,这个功能是开启红外感应 //添加监听 [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(sensorStateChange:) name:@"UIDeviceProximityStateDidChangeNotification&

iOS语音播放之切换听筒和扬声器

[[UIDevice currentDevice] setProximityMonitoringEnabled:YES]; //建议在播放之前设置yes,播放结束设置NO,这个功能是开启红外感应 //添加监听 [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(sensorStateChange:) name:@"UIDeviceProximityStateDidChangeNotification&

c#基于udp实现的p2p语音聊天工具

原创性申明 此博文的出处 为 http://blog.csdn.net/zhujunxxxxx/article/details/40124773如果进行转载请注明出处.本文作者原创,邮箱[email protected],如有问题请联系作者 概述 之前发过一篇文章http://blog.csdn.net/zhujunxxxxx/article/details/38864817 已经实现过了UDP的分包发送数据的功能,而这篇文章主要是一个应用,使用udp传送语音和文本等信息. 语音获取 要想发送语

如何使用 iOS 7 的 AVSpeechSynthesizer 制作有声书(1)

原文: http://www.raywenderlich.com/64623/make-narrated-book-using-avspeechsynthesizer-ios-7 随着 PageViewController 的引入,苹果让开发者们制作图书类app 更加轻松.不幸的是,对于生活在朝九晚五繁忙节奏中的人们来说,阅读也是一件奢侈的事情.为什么你不能在读一本小说的同时做其他事情呢? 在 Siri 刚开始出现的时候,苹果曾经用复杂的动态文本阅读将开发者拒之门外,但当iOS7 发布的时候,苹

C#基于UDP实现的P2P语音聊天工具(1)

这篇文章主要是一个应用,使用udp传送语音和文本等信息.在这个系统中没有服务端和客户端,相互通讯都是直接相互联系的.能够很好的实现效果. 语音获取 要想发送语音信息,首先得获取语音,这里有几种方法,一种是使用DirectX的DirectXsound来录音,我为了简便使用一个开源的插件NAudio来实现语音录取. 在项目中引用NAudio.dll //------------------录音相关----------------------------- private IWaveIn waveIn