第九天 iOS音频技术

1.

AQRecorder

    mRecordFormat.mFormatID = inFormatID;
    if (inFormatID == kAudioFormatLinearPCM)
    {
        // if we want pcm, default to signed 16-bit little-endian
        mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
        mRecordFormat.mBitsPerChannel = 16;
        mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
        mRecordFormat.mFramesPerPacket = 1;
    }

//    else  {
//
//        mRecordFormat.mSampleRate = 44100;//8000.0;//44100.0;
//
//        mRecordFormat.mFormatID = kAudioFormatMPEG4AAC; // kAudioFormatMPEG4AAC_HE does not work. Can‘t find `AudioClassDescription`. `mFormatFlags` is set to 0.
//        mRecordFormat.mFormatFlags = kMPEG4Object_AAC_LC; // Format-specific flags to specify details of the format. Set to 0 to indicate no format flags. See “Audio Data Format Identifiers” for the flags that apply to each format.
//        mRecordFormat.mBytesPerPacket = 0; // The number of bytes in a packet of audio data. To indicate variable packet size, set this field to 0. For a format that uses variable packet size, specify the size of each packet using an AudioStreamPacketDescription structure.
//        mRecordFormat.mFramesPerPacket = 0; // The number of frames in a packet of audio data. For uncompressed audio, the value is 1. For variable bit-rate formats, the value is a larger fixed number, such as 1024 for AAC. For formats with a variable number of frames per packet, such as Ogg Vorbis, set this field to 0.
//        mRecordFormat.mBytesPerFrame = 0; // The number of bytes from the start of one frame to the start of the next frame in an audio buffer. Set this field to 0 for compressed formats. ...
//        mRecordFormat.mChannelsPerFrame = 1; // The number of channels in each frame of audio data. This value must be nonzero.
//        mRecordFormat.mBitsPerChannel = 0; // ... Set this field to 0 for compressed formats.
//        mRecordFormat.mReserved = 0; // Pads the structure out to force an even 8-byte alignment. Must be set to 0.
//    }
        url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)inRecordFile, NULL);

        // create the audio file
        OSStatus status = AudioFileCreateWithURL(url, kAudioFileCAFType, &mRecordFormat, kAudioFileFlags_EraseFile, &mRecordFile);
        CFRelease(url);
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
void AQRecorder::MyInputBufferHandler(    void *                                inUserData,
                                      AudioQueueRef                        inAQ,
                                      AudioQueueBufferRef                    inBuffer,
                                      const AudioTimeStamp *                inStartTime,
                                      UInt32                                inNumPackets,
                                      const AudioStreamPacketDescription*    inPacketDesc)
{
    AQRecorder *aqr = (AQRecorder *)inUserData;
    try {
        if (inNumPackets > 0) {
            // write packets to file
            XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
                                                inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
                          "AudioFileWritePackets failed");
            aqr->mRecordPacket += inNumPackets;
        }

        // if we‘re not stopping, re-enqueue the buffe so that it gets filled again
        if (aqr->IsRunning())
            XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
    } catch (CAXException e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
    }
}

2.levelmeter

- (void)updateLevelMeter:(id)sender {
    /*
    if (self.delegate) {
        UInt32 dataSize = sizeof(AudioQueueLevelMeterState);
        AudioQueueGetProperty([self.decapsulator Queue], kAudioQueueProperty_CurrentLevelMeter, levelMeterStates, &dataSize);
        if ([self.delegate respondsToSelector:@selector(levelMeterChanged:)]) {
            [self.delegate levelMeterChanged:levelMeterStates[0].mPeakPower];
        }

    }*/
}
    //self.timerLevelMeter = [NSTimer scheduledTimerWithTimeInterval:0.2 target:self selector:@selector(updateLevelMeter:) userInfo:nil repeats:YES];
    __weak __typeof(self)weakSelf = self;
    MLAudioMeterObserver *meterObserver = [[MLAudioMeterObserver alloc]init];
    meterObserver.actionBlock = ^(NSArray *levelMeterStates,MLAudioMeterObserver *meterObserver){
        NSLog(@"volume:%f",[MLAudioMeterObserver volumeForLevelMeterStates:levelMeterStates]);

        if ([weakSelf.delegate respondsToSelector:@selector(levelMeterChanged:)]) {
            [weakSelf.delegate levelMeterChanged:[MLAudioMeterObserver volumeForLevelMeterStates:levelMeterStates]];
        }
    };
    meterObserver.errorBlock = ^(NSError *error,MLAudioMeterObserver *meterObserver){
        //[[[UIAlertView alloc]initWithTitle:@"错误" message:error.userInfo[NSLocalizedDescriptionKey] delegate:nil cancelButtonTitle:nil otherButtonTitles:@"知道了", nil]show];
    };
    self.meterObserver = meterObserver;
    self.meterObserver.audioQueue = player->Queue();

3.

linesview

- (void)levelMeterChanged:(float)levelMeter {
    dispatch_async(dispatch_get_main_queue(), ^{
            //self.levelMeter.progress = levelMeter;
        NSLog(@"%.2f",levelMeter*1);

        [self.levelMeterLineView1 addMeter:levelMeter*1];
        [self.levelMeterLineView2 addMeter:levelMeter*1];
    });

}
-(void)addMeter:(float)meter
{
    if (high) {
        meter = meter*0.6 + 0.4;
    } else {
        meter = meter*0.6 + 0.35;
    }
    high = !high;

    [_meters addObject:@(meter)];

    if (_meters.count > 10) {
        [_meters removeObjectAtIndex:0];
    }
    [self setNeedsDisplay];
}

DXRecordView

- (void)levelMeterChanged:(float)levelMeter {
    dispatch_async(dispatch_get_main_queue(), ^{
        //self.levelMeter.progress = levelMeter;
        NSLog(@"%.2f",levelMeter*1);

        float showMeter = levelMeter*0.6 + 0.35;

        [_recordView setVoiceImageWithLowPassResults:showMeter];

    });

}
-(void)setVoiceImageWithLowPassResults:(double)lowPassResults
{
    CGRect frame = _meterImageView.frame;
    frame.size.height = 39*lowPassResults;
    frame.origin.y  = 22+5.5+39*(1-lowPassResults);
    _meterImageView.frame = frame;

4.recordButton 按住说话

    //录制
    self.recordButton = [[UIButton alloc] initWithFrame:CGRectMake(24, kVerticalPadding, CGRectGetWidth(self.bounds)-(24 * 2), kInputTextViewMinHeight)];
    self.recordButton.titleLabel.font = [UIFont systemFontOfSize:15.0];
    [self.recordButton setTitleColor:[UIColor darkGrayColor] forState:UIControlStateNormal];
    [self.recordButton setBackgroundImage:[UIImage imageNamed:@"btn_long_round"] forState:UIControlStateNormal];
    [self.recordButton setBackgroundImage:[UIImage imageNamed:@"btn_long_round_hl"] forState:UIControlStateHighlighted];
    [self.recordButton setTitle:LOCALIZATION(@"按住说话") forState:UIControlStateNormal];
    [self.recordButton setTitle:LOCALIZATION(@"松开结束") forState:UIControlStateHighlighted];
    [self.recordButton setTitleColor:[UIColor whiteColor] forState:UIControlStateHighlighted];
    //self.recordButton.hidden = YES;
    [self.recordButton addTarget:self action:@selector(recordButtonTouchDown) forControlEvents:UIControlEventTouchDown];
    [self.recordButton addTarget:self action:@selector(recordButtonTouchUpOutside) forControlEvents:UIControlEventTouchUpOutside];
    [self.recordButton addTarget:self action:@selector(recordButtonTouchUpInside) forControlEvents:UIControlEventTouchUpInside];
    [self.recordButton addTarget:self action:@selector(recordDragOutside) forControlEvents:UIControlEventTouchDragExit];
    [self.recordButton addTarget:self action:@selector(recordDragInside) forControlEvents:UIControlEventTouchDragEnter];

5.EMChatAudioBubbleView

- (void)setModel:(MessageModel *)model
{
    [super setModel:model];

    _timeLabel.text = [NSString stringWithFormat:@"%d‘",self.model.time];

    if (self.model.isSender) {
        [_isReadView setHidden:YES];
        _animationImageView.image = [UIImage imageNamed:SENDER_ANIMATION_IMAGEVIEW_IMAGE_DEFAULT];
        _animationImageView.animationImages = _senderAnimationImages;
    }
    else{
        if (model.isPlayed) {
            [_isReadView setHidden:YES];
        }else{
            [_isReadView setHidden:NO];
        }

        _animationImageView.image = [UIImage imageNamed:RECEIVER_ANIMATION_IMAGEVIEW_IMAGE_DEFAULT];
        _animationImageView.animationImages = _recevierAnimationImages;
    }

    if (self.model.isPlaying)
    {
        [self startAudioAnimation];
    }else {
        [self stopAudioAnimation];
    }
}
@interface MiniCourseViewTableViewCell : UITableViewCell<UITableViewDataSource,UITableViewDelegate,CommentVoiceDelegate>

@property(nonatomic, strong) NSString * MiniContent;
@property(nonatomic, strong) UILabel *contentLabel,*numberLabel,* nameLabel,* timeLabel;
@property(nonatomic, strong) NSMutableArray * commentModelArray;
@property(nonatomic, strong) UIImageView * headImageView;
@property(nonatomic, strong) UIButton * laudButton;

@property (nonatomic, strong) UITableView * tableView;
@property (nonatomic, strong) PlayVoiceButton *playVoiceButton;
@property (nonatomic, strong)id <ReplyVoiceDelegate> delegate;

@property(nonatomic, strong) CourseReplay * replyModel;

+ (float)getHeightWithString:(NSString *)string fontSize:(int)size contenViewWidth:(CGFloat)width;

@end
 
@interface FollowTalkThingCommentTableViewCell : UITableViewCell

@property (nonatomic, strong) UIImageView *userImageView;
@property (nonatomic, strong) UILabel *nameLabel;
@property (nonatomic, strong) UILabel *timeLabel;
@property (nonatomic, strong) UIButton *zanButton;

@property (strong, nonatomic)  UILabel *answerLabel,*replyLabel;
@property (nonatomic, strong) PlayVoiceButton *voiceButton;
@property (strong, nonatomic)  UIView *answerView;

@property (strong, nonatomic) UILabel *scoreLabel;

@property (nonatomic, strong)id <playVoiceButton> delegate;

+(CGSize)cellBubbleSizeWithContent:(NSString *)content;

@property (strong, nonatomic)  id model;

@end
时间: 2024-10-10 11:02:10

第九天 iOS音频技术的相关文章

iOS音频技术的研究-音频格式

**什么是音频格式** 这个问题我也是查了很久才弄明白的.音频格式其实是指容器的类型,在通俗一点就是声音文件的类型,比如说"我爱你中国.mp3",这个声音文件的音频格式就是MP3. 这里稍微引入一些音频编码的东西.很多第一次涉及这个领域的(比如说我哈),很容易弄不清音频格式和音频编码的区别和联系,比如音频格式中有MP3格式,音频编码中有MP3编码,这时候多数人就不明白了. 音频编码本质是一种算法,我们拿到声音的原始数据之后,总不能直接就放到文件中用,我们需要根据不同的用途对于这些数据进

iOS音频视频开发起始点

Audio & Video Starting Point Multimedia technologies in iOS let you access the sophisticated audio and video capabilities of iPhone, iPad, and iPod touch. Specialized classes let you easily add basic features such as iPod library playback and movie c

ios音频降噪/拼接

1. http://www.leiphone.com/news/201406/record.html 关于手机录音和降噪那些事 本文作者是科通芯城的何顺义工程师. 想必大家都有这样的经历:接到朋友从火车站.地铁.会场.KTV等场合打来的电话,有时候很难听清楚,有时候却听得很清晰.这是为什么? 通常我们会认为是对方信号不稳定,所以通话质量有好有坏.其实不然,这种环境下能否听清对方讲话,主要取决于对方手机录音和降噪功能的优劣.同时,这也是高端手机和普通手机的一个重要区别. 任何功能的差别,归根到底,

iOS音频处理

ios音频处理 1. iOS底层音频处理技术(带源代码) http://www.cocoachina.com/ios/20111122/3563.html 2.ios 音频入门 http://blog.sina.com.cn/s/blog_7a162d000101b9w3.html 3.IOS 音频API介绍 http://www.cnblogs.com/kenshincui/p/4186022.html#audioQueueServices 4. FFMPEG 入门简介 http://blog

iOS多线程技术—单例模式(ARC)与(MRC)

iOS多线程技术—单例模式(ARC) 一.简单说明: 设计模式:多年软件开发,总结出来的一套经验.方法和工具 java中有23种设计模式,在ios中最常用的是单例模式和代理模式. 二.单例模式说明 (1)单例模式的作用 :可以保证在程序运行过程,一个类只有一个实例,而且该实例易于供外界访问,从而方便地控制了实例个数,并节约系统资源. (2)单例模式的使用场合:在整个应用程序中,共享一份资源(这份资源只需要创建初始化1次),应该让这个类创建出来的对象永远只有一个. (3)单例模式在ARC\MRC环

转: HTTP Live Streaming直播(iOS直播)技术分析与实现

http://www.cnblogs.com/haibindev/archive/2013/01/30/2880764.html HTTP Live Streaming直播(iOS直播)技术分析与实现 不经意间发现,大半年没写博客了,自觉汗颜.实则2012后半年,家中的事一样接着一样发生,实在是没有时间.快过年了,总算忙里偷闲,把最近的一些技术成果,总结成了文章,与大家分享. 前些日子,也是项目需要,花了一些时间研究了HTTP Live Streaming(HLS)技术,并实现了一个HLS编码器

iOS 音频开发之CoreAudio

转自:http://www.cnblogs.com/javawebsoa/archive/2013/05/20/3089511.html 接 触过IOS音频开发的同学都知道,Core Audio 是IOS和 MAC 的关于数字音频处理的基础,它提供应用程序用来处理音频的一组软件框架,所有关于IOS音频开发的接口都是由Core Audio来提供或者经过它提供的接口来进行封装的,按照官方的说法是集播放,音频处理录制为一体的专业技术,通过它我们的程序可以同时录制,播放一个或 者多个音频流,自动适应耳机

IOS音频1:之采用四种方式播放音频文件(一)AudioToolbox AVFoundation OpenAL AUDIO QUEUE

本文转载至 http://blog.csdn.net/u014011807/article/details/40187737 在本卷你可以学到什么? 采用四种方法设计应用于各种场合的音频播放器: 基于AudioToolbox.framework框架.播放系统声音文件. 基于AVFoundation.framework框架.播放多种音频格式文件.具有高级音频播放器功能.如实时显示进度.功率,控制音量.播放时间等. 基于OPENAL框架.实现对本地音频文件播放,播放pcm音频文件,3D音频特效. 基

IOS 音频播放

iOS音频播放 (一):概述 Audio Playback in iOS (Part 1) : Introduction 前言 从事音乐相关的app开发也已经有一段时日了,在这过程中app的播放器几经修改我也因此对于iOS下的音频播放实现有了一定的研究.写这个系列的博客目的一方面希望能够抛砖引玉,另一方面也是希望能帮助国内其他的iOS开发者和爱好者少走弯路(我自己就遇到了不少的坑=.=). 本篇为<iOS音频播放>系列的第一篇,主要将对iOS下实现音频播放的方法进行概述. 基础 先来简单了解一