借助iOS中的AudioQueueServices示例将音频线性pcm转换为mp3(使用LAME)

我是ios开发的新手。我正在将一个LinearPCM编码为iOS中的MP3。我正在尝试使用AudioToolbox框架和Lame将原始PCM数据从麦克风编码为MP3。虽然如果我记录.caf格式,一切似乎都运行正常。 我只得到编码流中存在的噪声和失真。 我不确定我是否正确设置了AudioQueue,并且我在正确的wat中处理编码缓冲区…我的代码来设置录音:

示例项目https://github.com/vecter/Audio-Queue-Services-Example

- (void)setupAudioFormat:(AudioStreamBasicDescription*)format { format->mSampleRate = 16000; format->mFormatID = kAudioFormatLinearPCM; format->mFramesPerPacket = 1; format->mChannelsPerFrame = 1; format->mBytesPerFrame = 2; format->mBytesPerPacket = 2; format->mBitsPerChannel = 16; format->mReserved = 0; format->mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; } - (void)recordPressed:(id)sender { if (!playState.playing) { if (!recordState.recording) { printf("Starting recording\n"); self.mergedData =[[NSMutableData alloc] init]; [self startRecording]; } else { printf("Stopping recording\n"); [self stopRecording]; } } else { printf("Can't start recording, currently playing\n"); } } - (void)startRecording { [self setupAudioFormat:&recordState.dataFormat]; recordState.currentPacket = 0; recordState.pThis=self; OSStatus status; status = AudioQueueNewInput(&recordState.dataFormat, AudioInputCallback, &recordState, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &recordState.queue); if (status == 0) { // Prime recording buffers with empty data for (int i = 0; i < NUM_BUFFERS; i++) { AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]); AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL); } status = AudioFileCreateWithURL(fileURL, kAudioFileAIFFType, &recordState.dataFormat, kAudioFileFlags_EraseFile, &recordState.audioFile); gfp = lame_init(); lame_set_num_channels(gfp, 1); lame_set_in_samplerate(gfp, recordState.dataFormat.mSampleRate); lame_set_VBR(gfp, vbr_default); lame_init_params(gfp); if (status == 0) { recordState.recording = true; status = AudioQueueStart(recordState.queue, NULL); if (status == 0) { mergeData =[[NSMutableData alloc]init]; labelStatus.text = @"Recording"; } } } if (status != 0) { [self stopRecording]; labelStatus.text = @"Record Failed"; } } - (void)stopRecording { recordState.recording = false; AudioQueueStop(recordState.queue, true); for(int i = 0; i < NUM_BUFFERS; i++) { AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]); } AudioQueueDispose(recordState.queue, true); AudioFileClose(recordState.audioFile); labelStatus.text = @"Idle"; } 

然后AudioQueue回调函数调用lame_encode_buffer,然后将编码的缓冲区写入文件:

 void AudioInputCallback(void * inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp * inStartTime, UInt32 inNumberPacketDescriptions, const AudioStreamPacketDescription * inPacketDescs) { RecordState * recordState = (RecordState*)inUserData; if (!recordState->recording) { printf("Not recording, returning\n"); } printf("Writing buffer %lld\n", recordState->currentPacket); OSStatus status = AudioFileWritePackets(recordState->audioFile, false, inBuffer->mAudioDataByteSize, inPacketDescs, recordState->currentPacket, &inNumberPacketDescriptions, inBuffer->mAudioData); if (status == 0) { recordState->currentPacket += inNumberPacketDescriptions; } AudioRecorderAppDelegate *this = recordState->pThis; const int MP3_BUFFER_SIZE=inBuffer->mAudioDataByteSize*4; unsigned char mEncodedBuffer[MP3_BUFFER_SIZE]; int encodedBytes=lame_encode_buffer_interleaved(this->gfp, (short int *)inBuffer->mAudioData , inNumberPacketDescriptions, mEncodedBuffer, MP3_BUFFER_SIZE); NSData* data = [NSData dataWithBytes:mEncodedBuffer length:encodedBytes]; [this writeData:data]; lame_encode_flush(this->gfp, mEncodedBuffer, MP3_BUFFER_SIZE); memset(&mEncodedBuffer, 0, sizeof(mEncodedBuffer)); AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL); } 

附加数据

 - (void) writeData:(NSData *)data { [mergeData appendData:data]; NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES); NSString* docDir = [paths objectAtIndex:0]; NSString* file = [docDir stringByAppendingString:@"/lame.mp3"]; [mergeData writeToFile:file atomically:YES]; NSLog(@"%@",file); } 

谁能告诉我这里有什么问题?

其他post已经完成的示例项目?

就我而言,这个逻辑有效:

 int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE); NSMutableData *data1=[[NSMutableData alloc]initWithBytes:mp3_buffer length:encodedBytes]; [this writeData:data]; 

尝试这个

 void AQRecorder::MyInputBufferHandler( void * inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp * inStartTime, UInt32 inNumPackets, const AudioStreamPacketDescription* inPacketDesc) { AQRecorder *aqr = (AQRecorder *)inUserData; // NSLog(@"%f",inStartTime->mSampleTime); try { if (inNumPackets > 0) { AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData); aqr->mRecordPacket += inNumPackets; int MP3_SIZE =inBuffer->mAudioDataByteSize * 4; unsigned char mp3_buffer[MP3_SIZE]; AppDelegate *delegate =[[UIApplication sharedApplication]delegate]; lame_t lame = lame_init(); lame_set_in_samplerate(lame, 44100); lame_set_VBR(lame, vbr_default); lame_init_params(lame); // int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE); int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData, (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE); [delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes]; if (inBuffer->mAudioDataByteSize != 0) { } else { int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE); [delegate.mp3AudioData appendBytes:mp3_buffer length:encode]; } lame_close(lame); } if (aqr->IsRunning()) { AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL); } } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } }