使用ExtAudioFileWrite for iOS将audio采样的缓冲区写入aac文件

更新:我已经解决了这个问题,并把我的解决scheme作为我自己问题的答案(见下文)

我正在尝试使用AAC格式的ExtAudioFileWrite将一个简单的audio采样缓冲区写入文件。

我已经用下面的代码实现了将一个单声道缓冲区写入一个.wav文件 – 但是,我不能这样做立体声或AAC文件,这是我想要做的。

这是我到目前为止…

CFStringRef fPath; fPath = CFStringCreateWithCString(kCFAllocatorDefault, "/path/to/my/audiofile/audiofile.wav", kCFStringEncodingMacRoman); OSStatus err; int mChannels = 1; UInt32 totalFramesInFile = 100000; Float32 *outputBuffer = (Float32 *)malloc(sizeof(Float32) * (totalFramesInFile*mChannels)); ////////////// Set up Audio Buffer List //////////// AudioBufferList outputData; outputData.mNumberBuffers = 1; outputData.mBuffers[0].mNumberChannels = mChannels; outputData.mBuffers[0].mDataByteSize = 4 * totalFramesInFile * mChannels; outputData.mBuffers[0].mData = outputBuffer; Float32 audioFile[totalFramesInFile*mChannels]; for (int i = 0;i < totalFramesInFile*mChannels;i++) { audioFile[i] = ((Float32)(rand() % 100))/100.0; audioFile[i] = audioFile[i]*0.2; } outputData.mBuffers[0].mData = &audioFile; CFURLRef fileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault,fPath,kCFURLPOSIXPathStyle,false); ExtAudioFileRef audiofileRef; // WAVE FILES AudioFileTypeID fileType = kAudioFileWAVEType; AudioStreamBasicDescription clientFormat; clientFormat.mSampleRate = 44100.0; clientFormat.mFormatID = kAudioFormatLinearPCM; clientFormat.mFormatFlags = 12; clientFormat.mBitsPerChannel = 16; clientFormat.mChannelsPerFrame = mChannels; clientFormat.mBytesPerFrame = 2*clientFormat.mChannelsPerFrame; clientFormat.mFramesPerPacket = 1; clientFormat.mBytesPerPacket = 2*clientFormat.mChannelsPerFrame; // open the file for writing err = ExtAudioFileCreateWithURL((CFURLRef)fileURL, fileType, &clientFormat, NULL, kAudioFileFlags_EraseFile, &audiofileRef); if (err != noErr) { cout << "Problem when creating audio file: " << err << "\n"; } // tell the ExtAudioFile API what format we'll be sending samples in err = ExtAudioFileSetProperty(audiofileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat); if (err != noErr) { cout << "Problem setting audio format: " << err << "\n"; } UInt32 rFrames = (UInt32)totalFramesInFile; // write the data err = ExtAudioFileWrite(audiofileRef, rFrames, &outputData); if (err != noErr) { cout << "Problem writing audio file: " << err << "\n"; } // close the file ExtAudioFileDispose(audiofileRef); NSLog(@"Done!"); 

我的具体问题是:

  • 如何为AAC设置AudioStreamBasicDescription?
  • 为什么我不能在这里得到立体声正常工作? 如果我将频道数(“mChannels”)设置为2,那么我会正确地获得左声道并在右声道中失真。

我非常感谢任何帮助 – 我想我已经阅读了几乎每一个页面,我可以find关于这一点,并没有更聪明的,虽然也有类似的问题,他们通常从一些input的audio文件中得到AudioStreamBasicDescription参数,我看不到结果。 苹果文档也没有帮助。

提前谢谢了,

亚当

好吧,经过一番探索,我已经明白了。 我已经把它作为一个函数写入随机噪声文件。 具体来说,它可以:

  • 写入.wav或.m4a文件
  • 以任一格式写入单声道或立体声
  • 将文件写入指定的path

函数参数是:

  • 要创build的audio文件的path
  • 通道数量(最多2个)
  • 布尔值:用m4a压缩(如果为false,则使用pcm)

对于立体声M4A文件,该函数应该被调用为:

 writeNoiseToAudioFile("/path/to/my/audiofile.m4a",2,true); 

函数的来源如下。 我尽可能多地评论它 – 我希望它是正确的,它对我来说肯定是有效的,但是如果有什么我错过了的话,请说“亚当,你犯了这个错误”。 祝你好运! 这里是代码:

 void writeNoiseToAudioFile(char *fName,int mChannels,bool compress_with_m4a) { OSStatus err; // to record errors from ExtAudioFile API functions // create file path as CStringRef CFStringRef fPath; fPath = CFStringCreateWithCString(kCFAllocatorDefault, fName, kCFStringEncodingMacRoman); // specify total number of samples per channel UInt32 totalFramesInFile = 100000; ///////////////////////////////////////////////////////////////////////////// ////////////// Set up Audio Buffer List For Interleaved Audio /////////////// ///////////////////////////////////////////////////////////////////////////// AudioBufferList outputData; outputData.mNumberBuffers = 1; outputData.mBuffers[0].mNumberChannels = mChannels; outputData.mBuffers[0].mDataByteSize = sizeof(AudioUnitSampleType)*totalFramesInFile*mChannels; ///////////////////////////////////////////////////////////////////////////// //////// Synthesise Noise and Put It In The AudioBufferList ///////////////// ///////////////////////////////////////////////////////////////////////////// // create an array to hold our audio AudioUnitSampleType audioFile[totalFramesInFile*mChannels]; // fill the array with random numbers (white noise) for (int i = 0;i < totalFramesInFile*mChannels;i++) { audioFile[i] = ((AudioUnitSampleType)(rand() % 100))/100.0; audioFile[i] = audioFile[i]*0.2; // (yes, I know this noise has a DC offset, bad) } // set the AudioBuffer to point to the array containing the noise outputData.mBuffers[0].mData = &audioFile; ///////////////////////////////////////////////////////////////////////////// ////////////////// Specify The Output Audio File Format ///////////////////// ///////////////////////////////////////////////////////////////////////////// // the client format will describe the output audio file AudioStreamBasicDescription clientFormat; // the file type identifier tells the ExtAudioFile API what kind of file we want created AudioFileTypeID fileType; // if compress_with_m4a is tru then set up for m4a file format if (compress_with_m4a) { // the file type identifier tells the ExtAudioFile API what kind of file we want created // this creates a m4a file type fileType = kAudioFileM4AType; // Here we specify the M4A format clientFormat.mSampleRate = 44100.0; clientFormat.mFormatID = kAudioFormatMPEG4AAC; clientFormat.mFormatFlags = kMPEG4Object_AAC_Main; clientFormat.mChannelsPerFrame = mChannels; clientFormat.mBytesPerPacket = 0; clientFormat.mBytesPerFrame = 0; clientFormat.mFramesPerPacket = 1024; clientFormat.mBitsPerChannel = 0; clientFormat.mReserved = 0; } else // else encode as PCM { // this creates a wav file type fileType = kAudioFileWAVEType; // This function audiomatically generates the audio format according to certain arguments FillOutASBDForLPCM(clientFormat,44100.0,mChannels,32,32,true,false,false); } ///////////////////////////////////////////////////////////////////////////// ///////////////// Specify The Format of Our Audio Samples /////////////////// ///////////////////////////////////////////////////////////////////////////// // the local format describes the format the samples we will give to the ExtAudioFile API AudioStreamBasicDescription localFormat; FillOutASBDForLPCM (localFormat,44100.0,mChannels,32,32,true,false,false); ///////////////////////////////////////////////////////////////////////////// ///////////////// Create the Audio File and Open It ///////////////////////// ///////////////////////////////////////////////////////////////////////////// // create the audio file reference ExtAudioFileRef audiofileRef; // create a fileURL from our path CFURLRef fileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault,fPath,kCFURLPOSIXPathStyle,false); // open the file for writing err = ExtAudioFileCreateWithURL((CFURLRef)fileURL, fileType, &clientFormat, NULL, kAudioFileFlags_EraseFile, &audiofileRef); if (err != noErr) { cout << "Problem when creating audio file: " << err << "\n"; } ///////////////////////////////////////////////////////////////////////////// ///// Tell the ExtAudioFile API what format we'll be sending samples in ///// ///////////////////////////////////////////////////////////////////////////// // Tell the ExtAudioFile API what format we'll be sending samples in err = ExtAudioFileSetProperty(audiofileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(localFormat), &localFormat); if (err != noErr) { cout << "Problem setting audio format: " << err << "\n"; } ///////////////////////////////////////////////////////////////////////////// ///////// Write the Contents of the AudioBufferList to the AudioFile //////// ///////////////////////////////////////////////////////////////////////////// UInt32 rFrames = (UInt32)totalFramesInFile; // write the data err = ExtAudioFileWrite(audiofileRef, rFrames, &outputData); if (err != noErr) { cout << "Problem writing audio file: " << err << "\n"; } ///////////////////////////////////////////////////////////////////////////// ////////////// Close the Audio File and Get Rid Of The Reference //////////// ///////////////////////////////////////////////////////////////////////////// // close the file ExtAudioFileDispose(audiofileRef); NSLog(@"Done!"); } 

不要忘记导入AudioToolbox框架并包含头文件:

 #import <AudioToolbox/AudioToolbox.h>