Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- - (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions
- {
- // .....
- AudioStreamBasicDescription inputASBD = [AppDelegate interleaved16BitStereoAudioDescription];
- AudioStreamBasicDescription outputASBD = [AppDelegate interleaved16BitStereoAudioDescription];
- // Or, to see this work properly:
- // AudioStreamBasicDescription inputASBD = [AppDelegate nonInterleaved16BitStereoAudioDescription];
- // AudioStreamBasicDescription outputASBD = [AppDelegate nonInterleaved16BitStereoAudioDescription];
- AudioConverterRef converter = NULL;
- checkResult(AudioConverterNew(&inputASBD, &outputASBD, &converter), "AudioConverterNew");
- UInt32 channelMap[2] = {0, 0};
- checkResult(AudioConverterSetProperty(converter, kAudioConverterChannelMap, sizeof(channelMap), channelMap), "AudioConverterSetProperty(kAudioConverterChannelMap");
- UInt32 frameCount = 64;
- AudioBufferList *inputBufferList = allocateAndInitAudioBufferList(inputASBD, frameCount);
- AudioBufferList *outputBufferList = allocateAndInitAudioBufferList(outputASBD, frameCount);
- for ( int i=0; i<frameCount; i++ ) {
- if ( inputASBD.mFormatFlags & kAudioFormatFlagIsNonInterleaved ) {
- ((SInt16*)inputBufferList->mBuffers[0].mData)[i] = 2*i;
- ((SInt16*)inputBufferList->mBuffers[1].mData)[i] = 2*i + 1;
- } else {
- ((SInt16*)inputBufferList->mBuffers[0].mData)[2*i] = 2*i;
- ((SInt16*)inputBufferList->mBuffers[0].mData)[2*i + 1] = 2*i + 1;
- }
- }
- OSStatus result = AudioConverterFillComplexBuffer(converter,
- fillComplexBufferInputProc,
- &(struct fillComplexBufferInputProc_t) { .bufferList = inputBufferList, .frames = frameCount },
- &frameCount,
- outputBufferList,
- NULL);
- checkResult(result, "AudioConverterFillComplexBuffer");
- printf("Expected\t\tActual\n");
- for ( int i=0; i<frameCount; i++ ) {
- printf("%3d\t%3d\t\t%3d\t%3d\n",
- 2*i + (int)channelMap[0],
- 2*i + (int)channelMap[1],
- outputASBD.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? ((SInt16*)outputBufferList->mBuffers[0].mData)[i] : ((SInt16*)outputBufferList->mBuffers[0].mData)[2*i],
- outputASBD.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? ((SInt16*)outputBufferList->mBuffers[1].mData)[i] : ((SInt16*)outputBufferList->mBuffers[0].mData)[2*i+1]);
- }
- return YES;
- }
- #pragma mark - Helpers
- + (AudioStreamBasicDescription)interleaved16BitStereoAudioDescription {
- AudioStreamBasicDescription audioDescription;
- memset(&audioDescription, 0, sizeof(audioDescription));
- audioDescription.mFormatID = kAudioFormatLinearPCM;
- audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
- audioDescription.mChannelsPerFrame = 2;
- audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
- audioDescription.mFramesPerPacket = 1;
- audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
- audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
- audioDescription.mSampleRate = 44100.0;
- return audioDescription;
- }
- + (AudioStreamBasicDescription)nonInterleaved16BitStereoAudioDescription {
- AudioStreamBasicDescription audioDescription;
- memset(&audioDescription, 0, sizeof(audioDescription));
- audioDescription.mFormatID = kAudioFormatLinearPCM;
- audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsNonInterleaved;
- audioDescription.mChannelsPerFrame = 2;
- audioDescription.mBytesPerPacket = sizeof(SInt16);
- audioDescription.mFramesPerPacket = 1;
- audioDescription.mBytesPerFrame = sizeof(SInt16);
- audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
- audioDescription.mSampleRate = 44100.0;
- return audioDescription;
- }
- struct fillComplexBufferInputProc_t { AudioBufferList *bufferList; UInt32 frames; };
- static OSStatus fillComplexBufferInputProc(AudioConverterRef inAudioConverter,
- UInt32 *ioNumberDataPackets,
- AudioBufferList *ioData,
- AudioStreamPacketDescription **outDataPacketDescription,
- void *inUserData) {
- struct fillComplexBufferInputProc_t *arg = inUserData;
- for ( int i=0; i<ioData->mNumberBuffers; i++ ) {
- ioData->mBuffers[i].mData = arg->bufferList->mBuffers[i].mData;
- ioData->mBuffers[i].mDataByteSize = arg->bufferList->mBuffers[i].mDataByteSize;
- }
- *ioNumberDataPackets = arg->frames;
- return noErr;
- }
- AudioBufferList *allocateAndInitAudioBufferList(AudioStreamBasicDescription audioFormat, int frameCount) {
- int numberOfBuffers = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? audioFormat.mChannelsPerFrame : 1;
- int channelsPerBuffer = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? 1 : audioFormat.mChannelsPerFrame;
- int bytesPerBuffer = audioFormat.mBytesPerFrame * frameCount;
- AudioBufferList *audio = malloc(sizeof(AudioBufferList) + (numberOfBuffers-1)*sizeof(AudioBuffer));
- if ( !audio ) {
- return NULL;
- }
- audio->mNumberBuffers = numberOfBuffers;
- for ( int i=0; i<numberOfBuffers; i++ ) {
- if ( bytesPerBuffer > 0 ) {
- audio->mBuffers[i].mData = malloc(bytesPerBuffer);
- if ( !audio->mBuffers[i].mData ) {
- for ( int j=0; j<i; j++ ) free(audio->mBuffers[j].mData);
- free(audio);
- return NULL;
- }
- } else {
- audio->mBuffers[i].mData = NULL;
- }
- audio->mBuffers[i].mDataByteSize = bytesPerBuffer;
- audio->mBuffers[i].mNumberChannels = channelsPerBuffer;
- }
- return audio;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement