1.导入依赖库
SystemConfiguration.framework
AudioToolbox.framework
UIkit.framework
AVFoundation.framework
Foundation.framework
libz.tbd
Security.framework
QuartzCore.framework
CoreText.framework
GLKit.framework
OpenGLES.framework
CoreLocation.framework
CFNetwork.framework
CoreGraphics.framework
注意还要导入依赖库:CoreTelephony.framework
2.需要包含的SDK文件有:
从开放平台下载的包里面的
Headers文件夹、Third Part文件夹、还有..resources文件夹(tone和scheme)、还有.a静态库文件
3.将工程的bitcode设置为NO
4.build Setting里面设置other linker flags为-ObjC
示例Demo:
#import "ViewController.h"
#import "BDVoiceRecognitionClient.h"
@interface ViewController ()<MVoiceRecognitionClientDelegate>
@end
@implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
}
- (void)viewWillDisappear:(BOOL)animated
{
[super viewWillDisappear:animated];
//取消监听语音音量
[[BDVoiceRecognitionClient sharedInstance] cancelListenCurrentDBLevelMeter];
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
- (IBAction)inputBtnClick:(id)sender {
BDVoiceRecognitionClient * client = [BDVoiceRecognitionClient sharedInstance];
[client setApiKey:@"Au2wN2SaDOpYZHgGqrIymMkU" withSecretKey:@"a0212d1fa0f28699aa5d1162a1bcbf1c"];
//设置识别垂类
//[client setPropertyList:@[[NSNumber numberWithInt:EVoiceRecognitionPropertyVideo]]];
//设置识别语言为普通话
[client setLanguage:EVoiceRecognitionPropertyMusic];
//禁用标点符号<默认不禁用>
[client disablePuncs:YES];
//设置是否对语音进行端点检测,即SDK会自动判断说话是否结束<默认开启>
[client setNeedVadFlag:YES];
//设置是否对上传的语音进行压缩<默认压缩>
[client setNeedCompressFlag:YES];
//设置在线识别的响应等待时间,如果超时,触发同步离线识别
[client setOnlineWaitTime:5];
//开启自然语言理解结果
[client setConfig:@"nlu" withFlag:YES];
//开始说话开始提示音
[client setPlayTone:EVoiceRecognitionPlayTonesRecStart isPlay:YES];
//开始说话结束提示音
[client setPlayTone:EVoiceRecognitionPlayTonesRecEnd isPlay:YES];
//打开语音音量功能
[client listenCurrentDBLevelMeter];
//获取当前语音音量级别
[client getCurrentDBLevelMeter];
int startStatus = [client startVoiceRecognition:self];
switch (startStatus) {
case EVoiceRecognitionStartWorking:
self.label.text = @"启动成功!";
break;
default:
self.label.text = [NSString stringWithFormat:@"启动失败 - 错误码:%d",startStatus];
break;
}
}
- (void)VoiceRecognitionClientWorkStatus:(int)aStatus obj:(id)aObj {
switch (aStatus) {
case EVoiceRecognitionClientWorkStatusFlushData: {
// 该状态值表示服务器返回了中间结果,如果想要将中间结果展示给用户(形成连续上屏的效果), // 可以利用与该状态同时返回的数据,每当接到新的该类消息应当清空显示区域的文字以免重复
NSMutableString *tmpString = [[NSMutableString alloc] initWithString:@""]; [tmpString appendFormat:@"%@",[aObj objectAtIndex:0]];
NSLog(@"result: %@", tmpString);
break; }
case EVoiceRecognitionClientWorkStatusFinish: {
// 该状态值表示语音识别服务器返回了最终结果,结果以数组的形式保存在 aObj 对象中 // 接受到该消息时应当清空显示区域的文字以免重复
if ([[BDVoiceRecognitionClient sharedInstance] getRecognitionProperty] != EVoiceRecognitionPropertyInput)
{
NSMutableArray *resultData = (NSMutableArray *)aObj; //解析结果,并显示
NSMutableString *tmpString = [[NSMutableString alloc] initWithString:@""];
// 获取识别候选词列表
for (int i=0; i<[resultData count]; i++) {
[tmpString appendFormat:@"%@\r\n",[resultData objectAtIndex:i]]; }
self.label.text = tmpString;
} else {
NSMutableString *sentenceString = [[NSMutableString alloc] initWithString:@""]; for (NSArray *result in aObj)// 此时 aObj 是 array,result 也是 array
{
// 取每条候选结果的第 条,进 组合
// result 的元素是 dictionary,对应 个候选词和对应的可信度
NSDictionary *dic = [result objectAtIndex:0];
NSString *candidateWord = [[dic allKeys] objectAtIndex:0];
[sentenceString appendString:candidateWord];
}
NSLog(@"result: %@", sentenceString);
}
break; }
case EVoiceRecognitionClientWorkStatusReceiveData: {
// 此状态只在输入模式下发生,表示语音识别正确返回结果,每个子句会通知一次(全量, // 即第二次收到该消息时所携带的结果包含第一句的识别结果),应用程序可以
// 逐句显示。配合连续上屏的中间结果,可以进一步 升语音输入的体验
NSMutableString *sentenceString = [[NSMutableString alloc] initWithString:@""];
for (NSArray *result in aObj)// 此时 aObj 是 array,result 也是 array
{
// 取每条候选结果的第 条,进 组合
// result 的元素是 dictionary,对应 个候选词和对应的可信度
NSDictionary *dic = [result objectAtIndex:0];
NSString *candidateWord = [[dic allKeys] objectAtIndex:0];
[sentenceString appendString:candidateWord];
}
NSLog(@"result: %@", sentenceString); break;
}
case EVoiceRecognitionClientWorkStatusNewRecordData: {
// 有音频数据输出,音频数据格式为 PCM,在有 WiFi 连接的条件下为 16k16bit,非 WiFi
// 为 8k16bit break;
}
case EVoiceRecognitionClientWorkStatusEnd: {
// 用户说话完成,但服务器尚未返回结果
break; }
case EVoiceRecognitionClientWorkStatusCancel: {
// 用户主动取消
break; }
case EVoiceRecognitionClientWorkStatusError: {
// 错误状态
self.label.text = @"没有语音输入";
break; }
case EVoiceRecognitionClientWorkPlayStartTone:
case EVoiceRecognitionClientWorkPlayStartToneFinish:
case EVoiceRecognitionClientWorkStatusStartWorkIng:
case EVoiceRecognitionClientWorkStatusStart:
case EVoiceRecognitionClientWorkPlayEndToneFinish:
case EVoiceRecognitionClientWorkPlayEndTone:
break;
}
}