VirtualBox

source: vbox/trunk/src/VBox/Devices/Audio/coreaudio.c@ 25916

Last change on this file since 25916 was 25916, checked in by vboxsync, 15 years ago

Audio-OSX: More updates to the CoreAudio backend. Added sample rate converter and channel mapper.

  • Property svn:eol-style set to native
File size: 64.3 KB
Line 
1/* $Id$ */
2/** @file
3 * VBox audio devices: Mac OS X CoreAudio audio driver
4 */
5
6/*
7 * Copyright (C) 2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#define LOG_GROUP LOG_GROUP_DEV_AUDIO
23#include <VBox/log.h>
24#include <iprt/mem.h>
25#include <iprt/cdefs.h>
26
27#define AUDIO_CAP "coreaudio"
28#include "vl_vbox.h"
29#include "audio.h"
30#include "audio_int.h"
31
32#include <CoreAudio/CoreAudio.h>
33#include <CoreServices/CoreServices.h>
34#include <AudioUnit/AudioUnit.h>
35#include <AudioToolbox/AudioConverter.h>
36
37/* todo:
38 * - checking for properties changes of the devices
39 * - checking for changing of the default device
40 * - let the user set the device used (use config)
41 * - try to set frame size (use config)
42 * - maybe make sure the threads are immediately stopped if playing/recording stops
43 */
44
45/* Most of this is based on:
46 * http://developer.apple.com/mac/library/technotes/tn2004/tn2097.html
47 * http://developer.apple.com/mac/library/technotes/tn2002/tn2091.html
48 * http://developer.apple.com/mac/library/qa/qa2007/qa1533.html
49 * http://developer.apple.com/mac/library/qa/qa2001/qa1317.html
50 * http://developer.apple.com/mac/library/documentation/AudioUnit/Reference/AUComponentServicesReference/Reference/reference.html
51 */
52
53/*******************************************************************************
54 *
55 * IO Ring Buffer section
56 *
57 ******************************************************************************/
58
59/* Implementation of a lock free ring buffer which could be used in a multi
60 * threaded environment. Note that only the acquire, release and getter
61 * functions are threading aware. So don't use reset if the ring buffer is
62 * still in use. */
63typedef struct IORINGBUFFER
64{
65 /* The current read position in the buffer */
66 uint32_t uReadPos;
67 /* The current write position in the buffer */
68 uint32_t uWritePos;
69 /* How much space of the buffer is currently in use */
70 volatile uint32_t cBufferUsed;
71 /* How big is the buffer */
72 uint32_t cBufSize;
73 /* The buffer itself */
74 char *pBuffer;
75} IORINGBUFFER;
76/* Pointer to an ring buffer structure */
77typedef IORINGBUFFER* PIORINGBUFFER;
78
79
80static void IORingBufferCreate(PIORINGBUFFER *ppBuffer, uint32_t cSize)
81{
82 PIORINGBUFFER pTmpBuffer;
83
84 AssertPtr(ppBuffer);
85
86 *ppBuffer = NULL;
87 pTmpBuffer = RTMemAllocZ(sizeof(IORINGBUFFER));
88 if (pTmpBuffer)
89 {
90 pTmpBuffer->pBuffer = RTMemAlloc(cSize);
91 if(pTmpBuffer->pBuffer)
92 {
93 pTmpBuffer->cBufSize = cSize;
94 *ppBuffer = pTmpBuffer;
95 }
96 else
97 RTMemFree(pTmpBuffer);
98 }
99}
100
101static void IORingBufferDestroy(PIORINGBUFFER pBuffer)
102{
103 if (pBuffer)
104 {
105 if (pBuffer->pBuffer)
106 RTMemFree(pBuffer->pBuffer);
107 RTMemFree(pBuffer);
108 }
109}
110
111DECL_FORCE_INLINE(void) IORingBufferReset(PIORINGBUFFER pBuffer)
112{
113 AssertPtr(pBuffer);
114
115 pBuffer->uReadPos = 0;
116 pBuffer->uWritePos = 0;
117 pBuffer->cBufferUsed = 0;
118}
119
120DECL_FORCE_INLINE(uint32_t) IORingBufferFree(PIORINGBUFFER pBuffer)
121{
122 AssertPtr(pBuffer);
123 return pBuffer->cBufSize - pBuffer->cBufferUsed;
124}
125
126DECL_FORCE_INLINE(uint32_t) IORingBufferUsed(PIORINGBUFFER pBuffer)
127{
128 AssertPtr(pBuffer);
129 return pBuffer->cBufferUsed;
130}
131
132DECL_FORCE_INLINE(uint32_t) IORingBufferSize(PIORINGBUFFER pBuffer)
133{
134 AssertPtr(pBuffer);
135 return pBuffer->cBufSize;
136}
137
138static void IORingBufferAquireReadBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
139{
140 uint32_t uUsed = 0;
141 uint32_t uSize = 0;
142
143 AssertPtr(pBuffer);
144
145 *ppStart = 0;
146 *pcSize = 0;
147
148 /* How much is in use? */
149 uUsed = ASMAtomicAddU32(&pBuffer->cBufferUsed, 0);
150 if (uUsed > 0)
151 {
152 /* Get the size out of the requested size, the read block till the end
153 * of the buffer & the currently used size. */
154 uSize = RT_MIN(cReqSize, RT_MIN(pBuffer->cBufSize - pBuffer->uReadPos, uUsed));
155 if (uSize > 0)
156 {
157 /* Return the pointer address which point to the current read
158 * position. */
159 *ppStart = pBuffer->pBuffer + pBuffer->uReadPos;
160 *pcSize = uSize;
161 }
162 }
163}
164
165DECL_FORCE_INLINE(void) IORingBufferReleaseReadBlock(PIORINGBUFFER pBuffer, uint32_t cSize)
166{
167 AssertPtr(pBuffer);
168
169 /* Split at the end of the buffer. */
170 pBuffer->uReadPos = (pBuffer->uReadPos + cSize) % pBuffer->cBufSize;
171
172 ASMAtomicSubU32((int32_t*)&pBuffer->cBufferUsed, cSize);
173}
174
175static void IORingBufferAquireWriteBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
176{
177 uint32_t uFree;
178 uint32_t uSize;
179
180 AssertPtr(pBuffer);
181
182 *ppStart = 0;
183 *pcSize = 0;
184
185 /* How much is free? */
186 uFree = pBuffer->cBufSize - ASMAtomicAddU32(&pBuffer->cBufferUsed, 0);
187 if (uFree > 0)
188 {
189 /* Get the size out of the requested size, the write block till the end
190 * of the buffer & the currently free size. */
191 uSize = RT_MIN(cReqSize, RT_MIN(pBuffer->cBufSize - pBuffer->uWritePos, uFree));
192 if (uSize > 0)
193 {
194 /* Return the pointer address which point to the current write
195 * position. */
196 *ppStart = pBuffer->pBuffer + pBuffer->uWritePos;
197 *pcSize = uSize;
198 }
199 }
200}
201
202DECL_FORCE_INLINE(void) IORingBufferReleaseWriteBlock(PIORINGBUFFER pBuffer, uint32_t cSize)
203{
204 AssertPtr(pBuffer);
205
206 /* Split at the end of the buffer. */
207 pBuffer->uWritePos = (pBuffer->uWritePos + cSize) % pBuffer->cBufSize;
208
209 ASMAtomicAddU32(&pBuffer->cBufferUsed, cSize);
210}
211
212/*******************************************************************************
213 *
214 * Helper function section
215 *
216 ******************************************************************************/
217
218#if DEBUG
219static void caDebugOutputAudioStreamBasicDescription(const char *pszDesc, const AudioStreamBasicDescription *pStreamDesc)
220{
221 char pszSampleRate[32];
222 Log(("%s AudioStreamBasicDescription:\n", pszDesc));
223 Log(("CoreAudio: Format ID: %RU32 (%c%c%c%c)\n", pStreamDesc->mFormatID, RT_BYTE4(pStreamDesc->mFormatID), RT_BYTE3(pStreamDesc->mFormatID), RT_BYTE2(pStreamDesc->mFormatID), RT_BYTE1(pStreamDesc->mFormatID)));
224 Log(("CoreAudio: Flags: %RU32", pStreamDesc->mFormatFlags));
225 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsFloat)
226 Log((" Float"));
227 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsBigEndian)
228 Log((" BigEndian"));
229 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsSignedInteger)
230 Log((" SignedInteger"));
231 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsPacked)
232 Log((" Packed"));
233 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsAlignedHigh)
234 Log((" AlignedHigh"));
235 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsNonInterleaved)
236 Log((" NonInterleaved"));
237 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsNonMixable)
238 Log((" NonMixable"));
239 if (pStreamDesc->mFormatFlags & kAudioFormatFlagsAreAllClear)
240 Log((" AllClear"));
241 Log(("\n"));
242 snprintf(pszSampleRate, 32, "%.2f", (float)pStreamDesc->mSampleRate);
243 Log(("CoreAudio: SampleRate: %s\n", pszSampleRate));
244 Log(("CoreAudio: ChannelsPerFrame: %RU32\n", pStreamDesc->mChannelsPerFrame));
245 Log(("CoreAudio: FramesPerPacket: %RU32\n", pStreamDesc->mFramesPerPacket));
246 Log(("CoreAudio: BitsPerChannel: %RU32\n", pStreamDesc->mBitsPerChannel));
247 Log(("CoreAudio: BytesPerFrame: %RU32\n", pStreamDesc->mBytesPerFrame));
248 Log(("CoreAudio: BytesPerPacket: %RU32\n", pStreamDesc->mBytesPerPacket));
249}
250#endif /* DEBUG */
251
252static void caAudioSettingsToAudioStreamBasicDescription(const audsettings_t *pAS, AudioStreamBasicDescription *pStreamDesc)
253{
254 pStreamDesc->mFormatID = kAudioFormatLinearPCM;
255 pStreamDesc->mFormatFlags = kAudioFormatFlagIsPacked;
256 pStreamDesc->mFramesPerPacket = 1;
257 pStreamDesc->mSampleRate = (Float64)pAS->freq;
258 pStreamDesc->mChannelsPerFrame = pAS->nchannels;
259 switch (pAS->fmt)
260 {
261 case AUD_FMT_U8:
262 {
263 pStreamDesc->mBitsPerChannel = 8;
264 break;
265 }
266 case AUD_FMT_S8:
267 {
268 pStreamDesc->mBitsPerChannel = 8;
269 pStreamDesc->mFormatFlags |= kAudioFormatFlagIsSignedInteger;
270 break;
271 }
272 case AUD_FMT_U16:
273 {
274 pStreamDesc->mBitsPerChannel = 16;
275 break;
276 }
277 case AUD_FMT_S16:
278 {
279 pStreamDesc->mBitsPerChannel = 16;
280 pStreamDesc->mFormatFlags |= kAudioFormatFlagIsSignedInteger;
281 break;
282 }
283#ifdef PA_SAMPLE_S32LE
284 case AUD_FMT_U32:
285 {
286 pStreamDesc->mBitsPerChannel = 32;
287 break;
288 }
289 case AUD_FMT_S32:
290 {
291 pStreamDesc->mBitsPerChannel = 32;
292 pStreamDesc->mFormatFlags |= kAudioFormatFlagIsSignedInteger;
293 break;
294 }
295#endif
296 default:
297 break;
298 }
299 pStreamDesc->mBytesPerFrame = pStreamDesc->mChannelsPerFrame * (pStreamDesc->mBitsPerChannel / 8);
300 pStreamDesc->mBytesPerPacket = pStreamDesc->mFramesPerPacket * pStreamDesc->mBytesPerFrame;
301}
302
303static OSStatus caSetFrameBufferSize(AudioDeviceID device, bool fInput, UInt32 cReqSize, UInt32 *pcActSize)
304{
305 OSStatus err = noErr;
306 UInt32 cSize = 0;
307 AudioValueRange *pRange = NULL;
308 size_t a = 0;
309 Float64 cMin = -1;
310 Float64 cMax = -1;
311
312 /* First try to set the new frame buffer size. */
313 AudioDeviceSetProperty(device,
314 NULL,
315 0,
316 fInput,
317 kAudioDevicePropertyBufferFrameSize,
318 sizeof(cReqSize),
319 &cReqSize);
320 /* Check if it really was set. */
321 cSize = sizeof(*pcActSize);
322 err = AudioDeviceGetProperty(device,
323 0,
324 fInput,
325 kAudioDevicePropertyBufferFrameSize,
326 &cSize,
327 pcActSize);
328 if (RT_UNLIKELY(err != noErr))
329 return err;
330 /* If both sizes are the same, we are done. */
331 if (cReqSize == *pcActSize)
332 return noErr;
333 /* If not we have to check the limits of the device. First get the size of
334 the buffer size range property. */
335 err = AudioDeviceGetPropertyInfo(device,
336 0,
337 fInput,
338 kAudioDevicePropertyBufferSizeRange,
339 &cSize,
340 NULL);
341 if (RT_UNLIKELY(err != noErr))
342 return err;
343 pRange = RTMemAllocZ(cSize);
344 if (VALID_PTR(pRange))
345 {
346 err = AudioDeviceGetProperty(device,
347 0,
348 fInput,
349 kAudioDevicePropertyBufferSizeRange,
350 &cSize,
351 pRange);
352 if (RT_LIKELY(err == noErr))
353 {
354 for (a=0; a < cSize/sizeof(AudioValueRange); ++a)
355 {
356 /* Search for the absolute minimum. */
357 if ( pRange[a].mMinimum < cMin
358 || cMin == -1)
359 cMin = pRange[a].mMinimum;
360 /* Search for the best maximum which isn't bigger than
361 cReqSize. */
362 if (pRange[a].mMaximum < cReqSize)
363 {
364 if (pRange[a].mMaximum > cMax)
365 cMax = pRange[a].mMaximum;
366 }
367 }
368 if (cMax == -1)
369 cMax = cMin;
370 cReqSize = cMax;
371 /* First try to set the new frame buffer size. */
372 AudioDeviceSetProperty(device,
373 NULL,
374 0,
375 fInput,
376 kAudioDevicePropertyBufferFrameSize,
377 sizeof(cReqSize),
378 &cReqSize);
379 /* Check if it really was set. */
380 cSize = sizeof(*pcActSize);
381 err = AudioDeviceGetProperty(device,
382 0,
383 fInput,
384 kAudioDevicePropertyBufferFrameSize,
385 &cSize,
386 pcActSize);
387 }
388 }
389 else
390 return notEnoughMemoryErr;
391
392 RTMemFree(pRange);
393 return err;
394}
395
396DECL_FORCE_INLINE(bool) caIsRunning(AudioDeviceID deviceID)
397{
398 OSStatus err = noErr;
399 UInt32 uFlag = 0;
400 UInt32 uSize = sizeof(uFlag);
401 err = AudioDeviceGetProperty(deviceID,
402 0,
403 0,
404 kAudioDevicePropertyDeviceIsRunning,
405 &uSize,
406 &uFlag);
407 if (err != kAudioHardwareNoError)
408 LogRel(("CoreAudio: Could not determine whether the device is running (%RI32)\n", err));
409 return uFlag >= 1;
410}
411
412/*******************************************************************************
413 *
414 * Global structures section
415 *
416 ******************************************************************************/
417
418struct
419{
420 int cBufferFrames;
421} conf =
422{
423 INIT_FIELD(.cBufferFrames =) 512
424};
425
426typedef struct caVoiceOut
427{
428 /* HW voice output structure defined by VBox */
429 HWVoiceOut hw;
430 /* Stream description which is default on the device */
431 AudioStreamBasicDescription deviceFormat;
432 /* Stream description which is selected for using by VBox */
433 AudioStreamBasicDescription streamFormat;
434 /* The audio device ID of the currently used device */
435 AudioDeviceID audioDeviceId;
436 /* The AudioUnit used */
437 AudioUnit audioUnit;
438 /* A ring buffer for transferring data to the playback thread */
439 PIORINGBUFFER pBuf;
440} caVoiceOut;
441
442typedef struct caVoiceIn
443{
444 /* HW voice input structure defined by VBox */
445 HWVoiceIn hw;
446 /* Stream description which is default on the device */
447 AudioStreamBasicDescription deviceFormat;
448 /* Stream description which is selected for using by VBox */
449 AudioStreamBasicDescription streamFormat;
450 /* The audio device ID of the currently used device */
451 AudioDeviceID audioDeviceId;
452 /* The AudioUnit used */
453 AudioUnit audioUnit;
454 /* The audio converter if necessary */
455 AudioConverterRef converter;
456 /* A temporary position value used in the caConverterCallback function */
457 uint32_t rpos;
458 /* The ratio between the device & the stream sample rate */
459 Float64 sampleRatio;
460 /* An extra buffer used for render the audio data in the recording thread */
461 AudioBufferList bufferList;
462 /* A ring buffer for transferring data from the recording thread */
463 PIORINGBUFFER pBuf;
464} caVoiceIn;
465
466/* Error code which indicates "End of data" */
467static const OSStatus caConverterEOFDErr = 0x656F6664; /* 'eofd' */
468
469/*******************************************************************************
470 *
471 * CoreAudio output section
472 *
473 ******************************************************************************/
474
475/* callback to feed audio output buffer */
476static OSStatus caPlaybackCallback(void* inRefCon,
477 AudioUnitRenderActionFlags* ioActionFlags,
478 const AudioTimeStamp* inTimeStamp,
479 UInt32 inBusNumber,
480 UInt32 inNumberFrames,
481 AudioBufferList* ioData)
482{
483 uint32_t csAvail = 0;
484 uint32_t cbToRead = 0;
485 uint32_t csToRead = 0;
486 uint32_t csReads = 0;
487 char *pcSrc = NULL;
488
489 caVoiceOut *caVoice = (caVoiceOut *) inRefCon;
490
491 /* How much space is used in the ring buffer? */
492 csAvail = IORingBufferUsed(caVoice->pBuf) >> caVoice->hw.info.shift; /* bytes -> samples */
493 /* How much space is available in the core audio buffer. Use the smaller
494 * size of the too. */
495 csAvail = RT_MIN(csAvail, ioData->mBuffers[0].mDataByteSize >> caVoice->hw.info.shift);
496
497 Log2(("CoreAudio: [Output] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
498
499 /* Iterate as long as data is available */
500 while(csReads < csAvail)
501 {
502 /* How much is left? */
503 csToRead = csAvail - csReads;
504 cbToRead = csToRead << caVoice->hw.info.shift; /* samples -> bytes */
505 Log2(("CoreAudio: [Output] Try reading %RU32 samples (%RU32 bytes)\n", csToRead, cbToRead));
506 /* Try to aquire the necessary block from the ring buffer. */
507 IORingBufferAquireReadBlock(caVoice->pBuf, cbToRead, &pcSrc, &cbToRead);
508 /* How much to we get? */
509 csToRead = cbToRead >> caVoice->hw.info.shift; /* bytes -> samples */
510 Log2(("CoreAudio: [Output] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
511 /* Break if nothing is used anymore. */
512 if (RT_UNLIKELY(cbToRead == 0))
513 break;
514 /* Copy the data from our ring buffer to the core audio buffer. */
515 memcpy((char*)ioData->mBuffers[0].mData + (csReads << caVoice->hw.info.shift), pcSrc, cbToRead);
516 /* Release the read buffer, so it could be used for new data. */
517 IORingBufferReleaseReadBlock(caVoice->pBuf, cbToRead);
518 /* How much have we reads so far. */
519 csReads += csToRead;
520 }
521 /* Write the bytes to the core audio buffer which where really written. */
522 ioData->mBuffers[0].mDataByteSize = csReads << caVoice->hw.info.shift; /* samples -> bytes */
523
524 Log2(("CoreAudio: [Output] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
525
526 return noErr;
527}
528
529static int coreaudio_run_out(HWVoiceOut *hw)
530{
531 uint32_t csAvail = 0;
532 uint32_t cbToWrite = 0;
533 uint32_t csToWrite = 0;
534 uint32_t csWritten = 0;
535 char *pcDst = NULL;
536 st_sample_t *psSrc = NULL;
537
538 caVoiceOut *caVoice = (caVoiceOut *) hw;
539
540 /* How much space is available in the ring buffer */
541 csAvail = IORingBufferFree(caVoice->pBuf) >> hw->info.shift; /* bytes -> samples */
542 /* How much data is availabe. Use the smaller size of the too. */
543 csAvail = RT_MIN(csAvail, (uint32_t)audio_pcm_hw_get_live_out(hw));
544
545 Log2(("CoreAudio: [Output] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << hw->info.shift));
546
547 /* Iterate as long as data is available */
548 while (csWritten < csAvail)
549 {
550 /* How much is left? Split request at the end of our samples buffer. */
551 csToWrite = RT_MIN(csAvail - csWritten, (uint32_t)(hw->samples - hw->rpos));
552 cbToWrite = csToWrite << hw->info.shift; /* samples -> bytes */
553 Log2(("CoreAudio: [Output] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
554 /* Try to aquire the necessary space from the ring buffer. */
555 IORingBufferAquireWriteBlock(caVoice->pBuf, cbToWrite, &pcDst, &cbToWrite);
556 /* How much to we get? */
557 csToWrite = cbToWrite >> hw->info.shift;
558 Log2(("CoreAudio: [Output] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
559 /* Break if nothing is free anymore. */
560 if (RT_UNLIKELY(cbToWrite == 0))
561 break;
562 /* Copy the data from our mix buffer to the ring buffer. */
563 psSrc = hw->mix_buf + hw->rpos;
564 hw->clip((uint8_t*)pcDst, psSrc, csToWrite);
565 /* Release the ring buffer, so the read thread could start reading this data. */
566 IORingBufferReleaseWriteBlock(caVoice->pBuf, cbToWrite);
567 hw->rpos = (hw->rpos + csToWrite) % hw->samples;
568 /* How much have we written so far. */
569 csWritten += csToWrite;
570 }
571
572 Log2(("CoreAudio: [Output] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << hw->info.shift));
573
574 /* Return the count of samples we have processed. */
575 return csWritten;
576}
577
578static int coreaudio_write(SWVoiceOut *sw, void *buf, int len)
579{
580 return audio_pcm_sw_write (sw, buf, len);
581}
582
583static int coreaudio_ctl_out(HWVoiceOut *hw, int cmd, ...)
584{
585 OSStatus err = noErr;
586 caVoiceOut *caVoice = (caVoiceOut *) hw;
587
588 switch (cmd)
589 {
590 case VOICE_ENABLE:
591 {
592 /* Only start the device if it is actually stopped */
593 if (!caIsRunning(caVoice->audioDeviceId))
594 {
595 IORingBufferReset(caVoice->pBuf);
596 err = AudioOutputUnitStart(caVoice->audioUnit);
597 if (RT_UNLIKELY(err != noErr))
598 {
599 LogRel(("CoreAudio: [Output] Failed to start playback (%RI32)\n", err));
600 return -1;
601 }
602 }
603 break;
604 }
605 case VOICE_DISABLE:
606 {
607 /* Only stop the device if it is actually running */
608 if (caIsRunning(caVoice->audioDeviceId))
609 {
610 err = AudioOutputUnitStop(caVoice->audioUnit);
611 if (RT_UNLIKELY(err != noErr))
612 {
613 LogRel(("CoreAudio: [Output] Failed to stop playback (%RI32)\n", err));
614 return -1;
615 }
616 err = AudioUnitReset(caVoice->audioUnit,
617 kAudioUnitScope_Input,
618 0);
619 if (RT_UNLIKELY(err != noErr))
620 {
621 LogRel(("CoreAudio: [Output] Failed to reset AudioUnit (%RI32)\n", err));
622 return -1;
623 }
624 }
625 break;
626 }
627 }
628 return 0;
629}
630
631static int coreaudio_init_out(HWVoiceOut *hw, audsettings_t *as)
632{
633 OSStatus err = noErr;
634 UInt32 uSize = 0; /* temporary size of properties */
635 UInt32 uFlag = 0; /* for setting flags */
636 CFStringRef name; /* for the temporary device name fetching */
637 const char *pszName;
638 ComponentDescription cd; /* description for an audio component */
639 Component cp; /* an audio component */
640 AURenderCallbackStruct cb; /* holds the callback structure */
641 UInt32 cFrames; /* default frame count */
642
643 caVoiceOut *caVoice = (caVoiceOut *) hw;
644
645 caVoice->audioUnit = NULL;
646 caVoice->audioDeviceId = kAudioDeviceUnknown;
647
648 /* Initialize the hardware info section with the audio settings */
649 audio_pcm_init_info(&hw->info, as);
650
651 /* Fetch the default audio output device currently in use */
652 uSize = sizeof(caVoice->audioDeviceId);
653 err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
654 &uSize,
655 &caVoice->audioDeviceId);
656 if (RT_UNLIKELY(err != noErr))
657 {
658 LogRel(("CoreAudio: [Output] Unable to find default output device (%RI32)\n", err));
659 return -1;
660 }
661
662 /* Try to get the name of the default output device and log it. It's not
663 * fatal if it fails. */
664 uSize = sizeof(CFStringRef);
665 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
666 0,
667 0,
668 kAudioObjectPropertyName,
669 &uSize,
670 &name);
671 if (RT_LIKELY(err == noErr))
672 {
673 pszName = CFStringGetCStringPtr(name, kCFStringEncodingMacRoman);
674 if (pszName)
675 LogRel(("CoreAudio: Using default output device: %s\n", pszName));
676 CFRelease(name);
677 }
678 else
679 LogRel(("CoreAudio: [Output] Unable to get output device name (%RI32)\n", err));
680
681 /* Get the default frames buffer size, so that we can setup our internal
682 * buffers. */
683 uSize = sizeof(cFrames);
684 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
685 0,
686 false,
687 kAudioDevicePropertyBufferFrameSize,
688 &uSize,
689 &cFrames);
690 if (RT_UNLIKELY(err != noErr))
691 {
692 LogRel(("CoreAudio: [Output] Failed to get frame buffer size of the audio device (%RI32)\n", err));
693 return -1;
694 }
695 /* Set the frame buffer size and honor any minimum/maximum restrictions on
696 the device. */
697 err = caSetFrameBufferSize(caVoice->audioDeviceId,
698 false,
699 cFrames,
700 &cFrames);
701 if (RT_UNLIKELY(err != noErr))
702 {
703 LogRel(("CoreAudio: [Output] Failed to set frame buffer size on the audio device (%RI32)\n", err));
704 return -1;
705 }
706
707 cd.componentType = kAudioUnitType_Output;
708 cd.componentSubType = kAudioUnitSubType_HALOutput;
709 cd.componentManufacturer = kAudioUnitManufacturer_Apple;
710 cd.componentFlags = 0;
711 cd.componentFlagsMask = 0;
712
713 /* Try to find the default HAL output component. */
714 cp = FindNextComponent(NULL, &cd);
715 if (RT_UNLIKELY(cp == 0))
716 {
717 LogRel(("CoreAudio: [Output] Failed to find HAL output component\n"));
718 return -1;
719 }
720
721 /* Open the default HAL output component. */
722 err = OpenAComponent(cp, &caVoice->audioUnit);
723 if (RT_UNLIKELY(err != noErr))
724 {
725 LogRel(("CoreAudio: [Output] Failed to open output component (%RI32)\n", err));
726 return -1;
727 }
728
729 /* Switch the I/O mode for output to on. */
730 uFlag = 1;
731 err = AudioUnitSetProperty(caVoice->audioUnit,
732 kAudioOutputUnitProperty_EnableIO,
733 kAudioUnitScope_Output,
734 0,
735 &uFlag,
736 sizeof(uFlag));
737 if (RT_UNLIKELY(err != noErr))
738 {
739 LogRel(("CoreAudio: [Output] Failed to set output I/O mode enabled (%RI32)\n", err));
740 return -1;
741 }
742
743 /* Set the default audio output device as the device for the new AudioUnit. */
744 err = AudioUnitSetProperty(caVoice->audioUnit,
745 kAudioOutputUnitProperty_CurrentDevice,
746 kAudioUnitScope_Output,
747 0,
748 &caVoice->audioDeviceId,
749 sizeof(caVoice->audioDeviceId));
750 if (RT_UNLIKELY(err != noErr))
751 {
752 LogRel(("CoreAudio: [Output] Failed to set current device (%RI32)\n", err));
753 return -1;
754 }
755
756 /* CoreAudio will inform us on a second thread when it needs more data for
757 * output. Therefor register an callback function which will provide the new
758 * data. */
759 cb.inputProc = caPlaybackCallback;
760 cb.inputProcRefCon = caVoice;
761
762 err = AudioUnitSetProperty(caVoice->audioUnit,
763 kAudioUnitProperty_SetRenderCallback,
764 kAudioUnitScope_Input,
765 0,
766 &cb,
767 sizeof(cb));
768 if (RT_UNLIKELY(err != noErr))
769 {
770 LogRel(("CoreAudio: [Output] Failed to set callback (%RI32)\n", err));
771 return -1;
772 }
773
774 /* Set the quality of the output render to the maximum. */
775/* uFlag = kRenderQuality_High;*/
776/* err = AudioUnitSetProperty(caVoice->audioUnit,*/
777/* kAudioUnitProperty_RenderQuality,*/
778/* kAudioUnitScope_Global,*/
779/* 0,*/
780/* &uFlag,*/
781/* sizeof(uFlag));*/
782 /* Not fatal */
783/* if (RT_UNLIKELY(err != noErr))*/
784/* LogRel(("CoreAudio: [Output] Failed to set the render quality to the maximum (%RI32)\n", err));*/
785
786 /* Fetch the current stream format of the device. */
787 uSize = sizeof(caVoice->deviceFormat);
788 err = AudioUnitGetProperty(caVoice->audioUnit,
789 kAudioUnitProperty_StreamFormat,
790 kAudioUnitScope_Input,
791 0,
792 &caVoice->deviceFormat,
793 &uSize);
794 if (RT_UNLIKELY(err != noErr))
795 {
796 LogRel(("CoreAudio: [Output] Failed to get device format (%RI32)\n", err));
797 return -1;
798 }
799
800 /* Create an AudioStreamBasicDescription based on the audio settings of
801 * VirtualBox. */
802 caAudioSettingsToAudioStreamBasicDescription(as, &caVoice->streamFormat);
803
804#if DEBUG
805 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Output] device", &caVoice->deviceFormat);
806 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Output] output", &caVoice->streamFormat);
807#endif /* DEBUG */
808
809 /* Set the device format description for the stream. */
810 err = AudioUnitSetProperty(caVoice->audioUnit,
811 kAudioUnitProperty_StreamFormat,
812 kAudioUnitScope_Input,
813 0,
814 &caVoice->streamFormat,
815 sizeof(caVoice->streamFormat));
816 if (RT_UNLIKELY(err != noErr))
817 {
818 LogRel(("CoreAudio: [Output] Failed to set stream format (%RI32)\n", err));
819 return -1;
820 }
821
822 uSize = sizeof(caVoice->deviceFormat);
823 err = AudioUnitGetProperty(caVoice->audioUnit,
824 kAudioUnitProperty_StreamFormat,
825 kAudioUnitScope_Input,
826 0,
827 &caVoice->deviceFormat,
828 &uSize);
829 if (RT_UNLIKELY(err != noErr))
830 {
831 LogRel(("CoreAudio: [Output] Failed to get device format (%RI32)\n", err));
832 return -1;
833 }
834
835 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Output] device again", &caVoice->deviceFormat);
836
837 /* Also set the frame buffer size off the device on our AudioUnit. This
838 should make sure that the frames count which we receive in the render
839 thread is as we like. */
840 err = AudioUnitSetProperty(caVoice->audioUnit,
841 kAudioUnitProperty_MaximumFramesPerSlice,
842 kAudioUnitScope_Global,
843 0,
844 &cFrames,
845 sizeof(cFrames));
846 if (RT_UNLIKELY(err != noErr))
847 {
848 LogRel(("CoreAudio: [Output] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
849 return -1;
850 }
851
852 /* Finally initialize the new AudioUnit. */
853 err = AudioUnitInitialize(caVoice->audioUnit);
854 if (RT_UNLIKELY(err != noErr))
855 {
856 LogRel(("CoreAudio: [Output] Failed to initialize the AudioUnit (%RI32)\n", err));
857 return -1;
858 }
859
860 /* There are buggy devices (e.g. my bluetooth headset) which doesn't honor
861 * the frame buffer size set in the previous calls. So finally get the
862 * frame buffer size after the AudioUnit was initialized. */
863 uSize = sizeof(cFrames);
864 err = AudioUnitGetProperty(caVoice->audioUnit,
865 kAudioUnitProperty_MaximumFramesPerSlice,
866 kAudioUnitScope_Global,
867 0,
868 &cFrames,
869 &uSize);
870 if (RT_UNLIKELY(err != noErr))
871 {
872 LogRel(("CoreAudio: [Output] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
873 return -1;
874 }
875
876 /* Create the internal ring buffer. */
877 hw->samples = cFrames * caVoice->streamFormat.mChannelsPerFrame;
878 IORingBufferCreate(&caVoice->pBuf, hw->samples << hw->info.shift);
879 if (!VALID_PTR(caVoice->pBuf))
880 {
881 LogRel(("CoreAudio: [Output] Failed to create internal ring buffer\n"));
882 AudioUnitUninitialize(caVoice->audioUnit);
883 return -1;
884 }
885
886 Log(("CoreAudio: [Output] HW samples: %d; Frame count: %RU32\n", hw->samples, cFrames));
887
888 return 0;
889}
890
891static void coreaudio_fini_out(HWVoiceOut *hw)
892{
893 int rc = 0;
894 OSStatus err = noErr;
895 caVoiceOut *caVoice = (caVoiceOut *) hw;
896
897 rc = coreaudio_ctl_out(hw, VOICE_DISABLE);
898 if (RT_LIKELY(rc == 0))
899 {
900 err = AudioUnitUninitialize(caVoice->audioUnit);
901 if (RT_LIKELY(err == noErr))
902 {
903 err = CloseComponent(caVoice->audioUnit);
904 if (RT_LIKELY(err == noErr))
905 {
906 caVoice->audioUnit = NULL;
907 caVoice->audioDeviceId = kAudioDeviceUnknown;
908 IORingBufferDestroy(caVoice->pBuf);
909 }
910 else
911 LogRel(("CoreAudio: [Output] Failed to close the AudioUnit (%RI32)\n", err));
912 }
913 else
914 LogRel(("CoreAudio: [Output] Failed to uninitialize the AudioUnit (%RI32)\n", err));
915 }
916 else
917 LogRel(("CoreAudio: [Output] Failed to stop playback (%RI32)\n", err));
918}
919
920/*******************************************************************************
921 *
922 * CoreAudio input section
923 *
924 ******************************************************************************/
925
926/* callback to convert audio input data from one format to another */
927static OSStatus caConverterCallback(AudioConverterRef inAudioConverter,
928 UInt32 *ioNumberDataPackets,
929 AudioBufferList *ioData,
930 AudioStreamPacketDescription **outDataPacketDescription,
931 void *inUserData)
932{
933 /* In principle we had to check here if the source is non interleaved & if
934 * so go through all buffers not only the first one like now. */
935 UInt32 cSize = 0;
936
937 caVoiceIn *caVoice = (caVoiceIn *) inUserData;
938
939 const AudioBufferList *pBufferList = &caVoice->bufferList;
940/* Log2(("converting .... ################ %RU32 %RU32 %RU32 %RU32 %RU32\n", *ioNumberDataPackets, bufferList->mBuffers[i].mNumberChannels, bufferList->mNumberBuffers, bufferList->mBuffers[i].mDataByteSize, ioData->mNumberBuffers));*/
941
942 /* Use the lower one of the packets to process & the available packets in
943 * the buffer */
944 cSize = RT_MIN(*ioNumberDataPackets * caVoice->deviceFormat.mBytesPerPacket,
945 pBufferList->mBuffers[0].mDataByteSize - caVoice->rpos);
946 /* Set the new size on output, so the caller know what we have processed. */
947 *ioNumberDataPackets = cSize / caVoice->deviceFormat.mBytesPerPacket;
948 /* If no data is available anymore we return with an error code. This error
949 * code will be returned from AudioConverterFillComplexBuffer. */
950 if (*ioNumberDataPackets == 0)
951 {
952 ioData->mBuffers[0].mDataByteSize = 0;
953 ioData->mBuffers[0].mData = NULL;
954 return caConverterEOFDErr;
955 }
956 else
957 {
958 ioData->mBuffers[0].mNumberChannels = pBufferList->mBuffers[0].mNumberChannels;
959 ioData->mBuffers[0].mDataByteSize = cSize;
960 ioData->mBuffers[0].mData = (char*)pBufferList->mBuffers[0].mData + caVoice->rpos;
961 caVoice->rpos += cSize;
962
963 /* Log2(("converting .... ################ %RU32 %RU32\n", size, caVoice->rpos));*/
964 }
965
966 return noErr;
967}
968
969/* callback to feed audio input buffer */
970static OSStatus caRecordingCallback(void* inRefCon,
971 AudioUnitRenderActionFlags* ioActionFlags,
972 const AudioTimeStamp* inTimeStamp,
973 UInt32 inBusNumber,
974 UInt32 inNumberFrames,
975 AudioBufferList* ioData)
976{
977 OSStatus err = noErr;
978 uint32_t csAvail = 0;
979 uint32_t csToWrite = 0;
980 uint32_t cbToWrite = 0;
981 uint32_t csWritten = 0;
982 char *pcDst = NULL;
983 AudioBufferList tmpList;
984 UInt32 ioOutputDataPacketSize = 0;
985
986 caVoiceIn *caVoice = (caVoiceIn *) inRefCon;
987
988 /* If nothing is pending return immediately. */
989 if (inNumberFrames == 0)
990 return noErr;
991
992 /* Are we using an converter? */
993 if (VALID_PTR(caVoice->converter))
994 {
995 /* Firstly render the data as usual */
996 caVoice->bufferList.mBuffers[0].mNumberChannels = caVoice->deviceFormat.mChannelsPerFrame;
997 caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->deviceFormat.mBytesPerFrame * inNumberFrames;
998 caVoice->bufferList.mBuffers[0].mData = RTMemAlloc(caVoice->bufferList.mBuffers[0].mDataByteSize);
999
1000 err = AudioUnitRender(caVoice->audioUnit,
1001 ioActionFlags,
1002 inTimeStamp,
1003 inBusNumber,
1004 inNumberFrames,
1005 &caVoice->bufferList);
1006 if(RT_UNLIKELY(err != noErr))
1007 {
1008 Log(("CoreAudio: [Input] Failed to render audio data (%RI32)\n", err));
1009 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1010 return err;
1011 }
1012
1013 /* How much space is free in the ring buffer? */
1014 csAvail = IORingBufferFree(caVoice->pBuf) >> caVoice->hw.info.shift; /* bytes -> samples */
1015 /* How much space is used in the core audio buffer. Use the smaller size of
1016 * the too. */
1017 csAvail = RT_MIN(csAvail, (uint32_t)((caVoice->bufferList.mBuffers[0].mDataByteSize / caVoice->deviceFormat.mBytesPerFrame) * caVoice->sampleRatio));
1018
1019 Log2(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
1020 /* Initialize the temporary output buffer */
1021 tmpList.mNumberBuffers = 1;
1022 tmpList.mBuffers[0].mNumberChannels = caVoice->streamFormat.mChannelsPerFrame;
1023 /* Set the read position to zero. */
1024 caVoice->rpos = 0;
1025 /* Iterate as long as data is available */
1026 while(csWritten < csAvail)
1027 {
1028 /* How much is left? */
1029 csToWrite = csAvail - csWritten;
1030 cbToWrite = csToWrite << caVoice->hw.info.shift;
1031 Log2(("CoreAudio: [Input] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
1032 /* Try to acquire the necessary space from the ring buffer. */
1033 IORingBufferAquireWriteBlock(caVoice->pBuf, cbToWrite, &pcDst, &cbToWrite);
1034 /* How much to we get? */
1035 csToWrite = cbToWrite >> caVoice->hw.info.shift;
1036 Log2(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
1037 /* Break if nothing is free anymore. */
1038 if (RT_UNLIKELY(cbToWrite == 0))
1039 break;
1040
1041 /* Now set how much space is available for output */
1042 ioOutputDataPacketSize = cbToWrite / caVoice->streamFormat.mBytesPerPacket;
1043 /* Set our ring buffer as target. */
1044 tmpList.mBuffers[0].mDataByteSize = cbToWrite;
1045 tmpList.mBuffers[0].mData = pcDst;
1046 AudioConverterReset(caVoice->converter);
1047 err = AudioConverterFillComplexBuffer(caVoice->converter,
1048 caConverterCallback,
1049 caVoice,
1050 &ioOutputDataPacketSize,
1051 &tmpList,
1052 NULL);
1053 if( RT_UNLIKELY(err != noErr)
1054 && err != caConverterEOFDErr)
1055 {
1056 Log(("CoreAudio: [Input] Failed to convert audio data (%RI32:%c%c%c%c)\n", err, RT_BYTE4(err), RT_BYTE3(err), RT_BYTE2(err), RT_BYTE1(err)));
1057 break;
1058 }
1059 /* Check in any case what processed size is returned. It could be
1060 * much littler than we expected. */
1061 cbToWrite = ioOutputDataPacketSize * caVoice->streamFormat.mBytesPerPacket;
1062 csToWrite = cbToWrite >> caVoice->hw.info.shift;
1063 /* Release the ring buffer, so the main thread could start reading this data. */
1064 IORingBufferReleaseWriteBlock(caVoice->pBuf, cbToWrite);
1065 csWritten += csToWrite;
1066 /* If the error is "End of Data" it means there is no data anymore
1067 * which could be converted. So end here now. */
1068 if (err == caConverterEOFDErr)
1069 break;
1070 }
1071 /* Cleanup */
1072 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1073 Log2(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
1074 }
1075 else
1076 {
1077 caVoice->bufferList.mBuffers[0].mNumberChannels = caVoice->streamFormat.mChannelsPerFrame;
1078 caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->streamFormat.mBytesPerFrame * inNumberFrames;
1079 caVoice->bufferList.mBuffers[0].mData = RTMemAlloc(caVoice->bufferList.mBuffers[0].mDataByteSize);
1080
1081 err = AudioUnitRender(caVoice->audioUnit,
1082 ioActionFlags,
1083 inTimeStamp,
1084 inBusNumber,
1085 inNumberFrames,
1086 &caVoice->bufferList);
1087 if(RT_UNLIKELY(err != noErr))
1088 {
1089 Log(("CoreAudio: [Input] Failed to render audio data (%RI32)\n", err));
1090 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1091 return err;
1092 }
1093
1094 /* How much space is free in the ring buffer? */
1095 csAvail = IORingBufferFree(caVoice->pBuf) >> caVoice->hw.info.shift; /* bytes -> samples */
1096 /* How much space is used in the core audio buffer. Use the smaller size of
1097 * the too. */
1098 csAvail = RT_MIN(csAvail, caVoice->bufferList.mBuffers[0].mDataByteSize >> caVoice->hw.info.shift);
1099
1100 Log2(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
1101
1102 /* Iterate as long as data is available */
1103 while(csWritten < csAvail)
1104 {
1105 /* How much is left? */
1106 csToWrite = csAvail - csWritten;
1107 cbToWrite = csToWrite << caVoice->hw.info.shift;
1108 Log2(("CoreAudio: [Input] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
1109 /* Try to aquire the necessary space from the ring buffer. */
1110 IORingBufferAquireWriteBlock(caVoice->pBuf, cbToWrite, &pcDst, &cbToWrite);
1111 /* How much to we get? */
1112 csToWrite = cbToWrite >> caVoice->hw.info.shift;
1113 Log2(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
1114 /* Break if nothing is free anymore. */
1115 if (RT_UNLIKELY(cbToWrite == 0))
1116 break;
1117 /* Copy the data from the core audio buffer to the ring buffer. */
1118 memcpy(pcDst, (char*)caVoice->bufferList.mBuffers[0].mData + (csWritten << caVoice->hw.info.shift), cbToWrite);
1119 /* Release the ring buffer, so the main thread could start reading this data. */
1120 IORingBufferReleaseWriteBlock(caVoice->pBuf, cbToWrite);
1121 csWritten += csToWrite;
1122 }
1123 /* Cleanup */
1124 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1125
1126 Log2(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
1127 }
1128
1129 return err;
1130}
1131
1132static int coreaudio_run_in(HWVoiceIn *hw)
1133{
1134 uint32_t csAvail = 0;
1135 uint32_t cbToRead = 0;
1136 uint32_t csToRead = 0;
1137 uint32_t csReads = 0;
1138 char *pcSrc;
1139 st_sample_t *psDst;
1140
1141 caVoiceIn *caVoice = (caVoiceIn *) hw;
1142
1143 /* How much space is used in the ring buffer? */
1144 csAvail = IORingBufferUsed(caVoice->pBuf) >> hw->info.shift; /* bytes -> samples */
1145 /* How much space is available in the mix buffer. Use the smaller size of
1146 * the too. */
1147 csAvail = RT_MIN(csAvail, (uint32_t)(hw->samples - audio_pcm_hw_get_live_in (hw)));
1148
1149 Log2(("CoreAudio: [Input] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
1150
1151 /* Iterate as long as data is available */
1152 while (csReads < csAvail)
1153 {
1154 /* How much is left? Split request at the end of our samples buffer. */
1155 csToRead = RT_MIN(csAvail - csReads, (uint32_t)(hw->samples - hw->wpos));
1156 cbToRead = csToRead << hw->info.shift;
1157 Log2(("CoreAudio: [Input] Try reading %RU32 samples (%RU32 bytes)\n", csToRead, cbToRead));
1158 /* Try to aquire the necessary block from the ring buffer. */
1159 IORingBufferAquireReadBlock(caVoice->pBuf, cbToRead, &pcSrc, &cbToRead);
1160 /* How much to we get? */
1161 csToRead = cbToRead >> hw->info.shift;
1162 Log2(("CoreAudio: [Input] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
1163 /* Break if nothing is used anymore. */
1164 if (cbToRead == 0)
1165 break;
1166 /* Copy the data from our ring buffer to the mix buffer. */
1167 psDst = hw->conv_buf + hw->wpos;
1168 hw->conv(psDst, pcSrc, csToRead, &nominal_volume);
1169 /* Release the read buffer, so it could be used for new data. */
1170 IORingBufferReleaseReadBlock(caVoice->pBuf, cbToRead);
1171 hw->wpos = (hw->wpos + csToRead) % hw->samples;
1172 /* How much have we reads so far. */
1173 csReads += csToRead;
1174 }
1175
1176 Log2(("CoreAudio: [Input] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
1177
1178 return csReads;
1179}
1180
1181static int coreaudio_read(SWVoiceIn *sw, void *buf, int size)
1182{
1183 return audio_pcm_sw_read (sw, buf, size);
1184}
1185
1186static int coreaudio_ctl_in(HWVoiceIn *hw, int cmd, ...)
1187{
1188 OSStatus err = noErr;
1189 caVoiceIn *caVoice = (caVoiceIn *) hw;
1190
1191 switch (cmd)
1192 {
1193 case VOICE_ENABLE:
1194 {
1195 /* Only start the device if it is actually stopped */
1196 if (!caIsRunning(caVoice->audioDeviceId))
1197 {
1198 IORingBufferReset(caVoice->pBuf);
1199 err = AudioOutputUnitStart(caVoice->audioUnit);
1200 }
1201 if (RT_UNLIKELY(err != noErr))
1202 {
1203 LogRel(("CoreAudio: [Input] Failed to start recording (%RI32)\n", err));
1204 return -1;
1205 }
1206 break;
1207 }
1208 case VOICE_DISABLE:
1209 {
1210 /* Only stop the device if it is actually running */
1211 if (caIsRunning(caVoice->audioDeviceId))
1212 {
1213 err = AudioOutputUnitStop(caVoice->audioUnit);
1214 if (RT_UNLIKELY(err != noErr))
1215 {
1216 LogRel(("CoreAudio: [Input] Failed to stop recording (%RI32)\n", err));
1217 return -1;
1218 }
1219 err = AudioUnitReset(caVoice->audioUnit,
1220 kAudioUnitScope_Input,
1221 0);
1222 if (RT_UNLIKELY(err != noErr))
1223 {
1224 LogRel(("CoreAudio: [Input] Failed to reset AudioUnit (%RI32)\n", err));
1225 return -1;
1226 }
1227 }
1228 break;
1229 }
1230 }
1231 return 0;
1232}
1233
1234static int coreaudio_init_in(HWVoiceIn *hw, audsettings_t *as)
1235{
1236 OSStatus err = noErr;
1237 int rc = -1;
1238 UInt32 uSize = 0; /* temporary size of properties */
1239 UInt32 uFlag = 0; /* for setting flags */
1240 CFStringRef name; /* for the temporary device name fetching */
1241 const char *pszName;
1242 ComponentDescription cd; /* description for an audio component */
1243 Component cp; /* an audio component */
1244 AURenderCallbackStruct cb; /* holds the callback structure */
1245 UInt32 cFrames; /* default frame count */
1246 const SInt32 channelMap[2] = {0, 0}; /* Channel map for mono -> stereo */
1247
1248 caVoiceIn *caVoice = (caVoiceIn *) hw;
1249
1250 caVoice->audioUnit = NULL;
1251 caVoice->audioDeviceId = kAudioDeviceUnknown;
1252 caVoice->converter = NULL;
1253 caVoice->sampleRatio = 1;
1254
1255 /* Initialize the hardware info section with the audio settings */
1256 audio_pcm_init_info(&hw->info, as);
1257
1258 /* Fetch the default audio input device currently in use */
1259 uSize = sizeof(caVoice->audioDeviceId);
1260 err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
1261 &uSize,
1262 &caVoice->audioDeviceId);
1263 if (RT_UNLIKELY(err != noErr))
1264 {
1265 LogRel(("CoreAudio: [Input] Unable to find default input device (%RI32)\n", err));
1266 return -1;
1267 }
1268
1269 /* Try to get the name of the default input device and log it. It's not
1270 * fatal if it fails. */
1271 uSize = sizeof(CFStringRef);
1272 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
1273 0,
1274 1,
1275 kAudioObjectPropertyName,
1276 &uSize,
1277 &name);
1278 if (RT_LIKELY(err == noErr))
1279 {
1280 pszName = CFStringGetCStringPtr(name, kCFStringEncodingMacRoman);
1281 if (pszName)
1282 LogRel(("CoreAudio: Using default input device: %s\n", pszName));
1283 CFRelease(name);
1284 }
1285 else
1286 LogRel(("CoreAudio: [Input] Unable to get input device name (%RI32)\n", err));
1287
1288 /* Get the default frames buffer size, so that we can setup our internal
1289 * buffers. */
1290 uSize = sizeof(cFrames);
1291 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
1292 0,
1293 true,
1294 kAudioDevicePropertyBufferFrameSize,
1295 &uSize,
1296 &cFrames);
1297 if (RT_UNLIKELY(err != noErr))
1298 {
1299 LogRel(("CoreAudio: [Input] Failed to get frame buffer size of the audio device (%RI32)\n", err));
1300 return -1;
1301 }
1302 /* Set the frame buffer size and honor any minimum/maximum restrictions on
1303 the device. */
1304 err = caSetFrameBufferSize(caVoice->audioDeviceId,
1305 true,
1306 cFrames,
1307 &cFrames);
1308 if (RT_UNLIKELY(err != noErr))
1309 {
1310 LogRel(("CoreAudio: [Input] Failed to set frame buffer size on the audio device (%RI32)\n", err));
1311 return -1;
1312 }
1313
1314 cd.componentType = kAudioUnitType_Output;
1315 cd.componentSubType = kAudioUnitSubType_HALOutput;
1316 cd.componentManufacturer = kAudioUnitManufacturer_Apple;
1317 cd.componentFlags = 0;
1318 cd.componentFlagsMask = 0;
1319
1320 /* Try to find the default HAL output component. */
1321 cp = FindNextComponent(NULL, &cd);
1322 if (RT_UNLIKELY(cp == 0))
1323 {
1324 LogRel(("CoreAudio: [Input] Failed to find HAL output component\n"));
1325 return -1;
1326 }
1327
1328 /* Open the default HAL output component. */
1329 err = OpenAComponent(cp, &caVoice->audioUnit);
1330 if (RT_UNLIKELY(err != noErr))
1331 {
1332 LogRel(("CoreAudio: [Input] Failed to open output component (%RI32)\n", err));
1333 return -1;
1334 }
1335
1336 /* Switch the I/O mode for input to on. */
1337 uFlag = 1;
1338 err = AudioUnitSetProperty(caVoice->audioUnit,
1339 kAudioOutputUnitProperty_EnableIO,
1340 kAudioUnitScope_Input,
1341 1,
1342 &uFlag,
1343 sizeof(uFlag));
1344 if (RT_UNLIKELY(err != noErr))
1345 {
1346 LogRel(("CoreAudio: [Input] Failed to set input I/O mode enabled (%RI32)\n", err));
1347 return -1;
1348 }
1349
1350 /* Switch the I/O mode for output to off. This is important, as this is a
1351 * pure input stream. */
1352 uFlag = 0;
1353 err = AudioUnitSetProperty(caVoice->audioUnit,
1354 kAudioOutputUnitProperty_EnableIO,
1355 kAudioUnitScope_Output,
1356 0,
1357 &uFlag,
1358 sizeof(uFlag));
1359 if (RT_UNLIKELY(err != noErr))
1360 {
1361 LogRel(("CoreAudio: [Input] Failed to set output I/O mode disabled (%RI32)\n", err));
1362 return -1;
1363 }
1364
1365 /* Set the default audio input device as the device for the new AudioUnit. */
1366 err = AudioUnitSetProperty(caVoice->audioUnit,
1367 kAudioOutputUnitProperty_CurrentDevice,
1368 kAudioUnitScope_Global,
1369 0,
1370 &caVoice->audioDeviceId,
1371 sizeof(caVoice->audioDeviceId));
1372 if (RT_UNLIKELY(err != noErr))
1373 {
1374 LogRel(("CoreAudio: [Input] Failed to set current device (%RI32)\n", err));
1375 return -1;
1376 }
1377
1378 /* CoreAudio will inform us on a second thread for new incoming audio data.
1379 * Therefor register an callback function, which will process the new data.
1380 * */
1381 cb.inputProc = caRecordingCallback;
1382 cb.inputProcRefCon = caVoice;
1383
1384 err = AudioUnitSetProperty(caVoice->audioUnit,
1385 kAudioOutputUnitProperty_SetInputCallback,
1386 kAudioUnitScope_Global,
1387 0,
1388 &cb,
1389 sizeof(cb));
1390 if (RT_UNLIKELY(err != noErr))
1391 {
1392 LogRel(("CoreAudio: [Input] Failed to set callback (%RI32)\n", err));
1393 return -1;
1394 }
1395
1396 /* Fetch the current stream format of the device. */
1397 uSize = sizeof(caVoice->deviceFormat);
1398 err = AudioUnitGetProperty(caVoice->audioUnit,
1399 kAudioUnitProperty_StreamFormat,
1400 kAudioUnitScope_Input,
1401 1,
1402 &caVoice->deviceFormat,
1403 &uSize);
1404 if (RT_UNLIKELY(err != noErr))
1405 {
1406 LogRel(("CoreAudio: [Input] Failed to get device format (%RI32)\n", err));
1407 return -1;
1408 }
1409
1410 /* Create an AudioStreamBasicDescription based on the audio settings of
1411 * VirtualBox. */
1412 caAudioSettingsToAudioStreamBasicDescription(as, &caVoice->streamFormat);
1413
1414#if DEBUG
1415 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Input] device", &caVoice->deviceFormat);
1416 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Input] input", &caVoice->streamFormat);
1417#endif /* DEBUG */
1418
1419 /* If the frequency of the device is different from the requested one we
1420 * need a converter. The same count if the number of channels is different. */
1421 if ( caVoice->deviceFormat.mSampleRate != caVoice->streamFormat.mSampleRate
1422 || caVoice->deviceFormat.mChannelsPerFrame != caVoice->streamFormat.mChannelsPerFrame)
1423 {
1424 err = AudioConverterNew(&caVoice->deviceFormat,
1425 &caVoice->streamFormat,
1426 &caVoice->converter);
1427 if (RT_UNLIKELY(err != noErr))
1428 {
1429 LogRel(("CoreAudio: [Input] Failed to create the audio converter (%RI32)\n", err));
1430 return -1;
1431 }
1432
1433 if (caVoice->deviceFormat.mChannelsPerFrame == 1 &&
1434 caVoice->streamFormat.mChannelsPerFrame == 2)
1435 {
1436 /* If the channel count is different we have to tell this the converter
1437 and supply a channel mapping. For now we only support mapping
1438 from mono to stereo. For all other cases the core audio defaults
1439 are used, which means dropping additional channels in most
1440 cases. */
1441 err = AudioConverterSetProperty(caVoice->converter,
1442 kAudioConverterChannelMap,
1443 sizeof(channelMap),
1444 channelMap);
1445 if (RT_UNLIKELY(err != noErr))
1446 {
1447 LogRel(("CoreAudio: [Input] Failed to add a channel mapper to the audio converter (%RI32)\n", err));
1448 return -1;
1449 }
1450 }
1451 /* Set sample rate converter quality to maximum */
1452/* uFlag = kAudioConverterQuality_Max;*/
1453/* err = AudioConverterSetProperty(caVoice->converter,*/
1454/* kAudioConverterSampleRateConverterQuality,*/
1455/* sizeof(uFlag),*/
1456/* &uFlag);*/
1457 /* Not fatal */
1458/* if (RT_UNLIKELY(err != noErr))*/
1459/* LogRel(("CoreAudio: [Input] Failed to set the audio converter quality to the maximum (%RI32)\n", err));*/
1460
1461 Log(("CoreAudio: [Input] Converter in use\n"));
1462 /* Set the new format description for the stream. */
1463 err = AudioUnitSetProperty(caVoice->audioUnit,
1464 kAudioUnitProperty_StreamFormat,
1465 kAudioUnitScope_Output,
1466 1,
1467 &caVoice->deviceFormat,
1468 sizeof(caVoice->deviceFormat));
1469 if (RT_UNLIKELY(err != noErr))
1470 {
1471 LogRel(("CoreAudio: [Input] Failed to set stream format (%RI32)\n", err));
1472 return -1;
1473 }
1474 err = AudioUnitSetProperty(caVoice->audioUnit,
1475 kAudioUnitProperty_StreamFormat,
1476 kAudioUnitScope_Input,
1477 1,
1478 &caVoice->deviceFormat,
1479 sizeof(caVoice->deviceFormat));
1480 if (RT_UNLIKELY(err != noErr))
1481 {
1482 LogRel(("CoreAudio: [Input] Failed to set stream format (%RI32)\n", err));
1483 return -1;
1484 }
1485 }
1486 else
1487 {
1488 /* Set the new format description for the stream. */
1489 err = AudioUnitSetProperty(caVoice->audioUnit,
1490 kAudioUnitProperty_StreamFormat,
1491 kAudioUnitScope_Output,
1492 1,
1493 &caVoice->streamFormat,
1494 sizeof(caVoice->streamFormat));
1495 if (RT_UNLIKELY(err != noErr))
1496 {
1497 LogRel(("CoreAudio: [Input] Failed to set stream format (%RI32)\n", err));
1498 return -1;
1499 }
1500 }
1501
1502 /* Also set the frame buffer size off the device on our AudioUnit. This
1503 should make sure that the frames count which we receive in the render
1504 thread is as we like. */
1505 err = AudioUnitSetProperty(caVoice->audioUnit,
1506 kAudioUnitProperty_MaximumFramesPerSlice,
1507 kAudioUnitScope_Global,
1508 1,
1509 &cFrames,
1510 sizeof(cFrames));
1511 if (RT_UNLIKELY(err != noErr))
1512 {
1513 LogRel(("CoreAudio: [Input] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
1514 return -1;
1515 }
1516
1517 /* Finally initialize the new AudioUnit. */
1518 err = AudioUnitInitialize(caVoice->audioUnit);
1519 if (RT_UNLIKELY(err != noErr))
1520 {
1521 LogRel(("CoreAudio: [Input] Failed to initialize the AudioUnit (%RI32)\n", err));
1522 return -1;
1523 }
1524
1525 uSize = sizeof(caVoice->deviceFormat);
1526 err = AudioUnitGetProperty(caVoice->audioUnit,
1527 kAudioUnitProperty_StreamFormat,
1528 kAudioUnitScope_Output,
1529 1,
1530 &caVoice->deviceFormat,
1531 &uSize);
1532 if (RT_UNLIKELY(err != noErr))
1533 {
1534 LogRel(("CoreAudio: [Input] Failed to get device format (%RI32)\n", err));
1535 return -1;
1536 }
1537
1538 /* There are buggy devices (e.g. my bluetooth headset) which doesn't honor
1539 * the frame buffer size set in the previous calls. So finally get the
1540 * frame buffer size after the AudioUnit was initialized. */
1541 uSize = sizeof(cFrames);
1542 err = AudioUnitGetProperty(caVoice->audioUnit,
1543 kAudioUnitProperty_MaximumFramesPerSlice,
1544 kAudioUnitScope_Global,
1545 0,
1546 &cFrames,
1547 &uSize);
1548 if (RT_UNLIKELY(err != noErr))
1549 {
1550 LogRel(("CoreAudio: [Input] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
1551 return -1;
1552 }
1553
1554 /* Calculate the ratio between the device and the stream sample rate. */
1555 caVoice->sampleRatio = caVoice->streamFormat.mSampleRate / caVoice->deviceFormat.mSampleRate;
1556
1557 /* Set to zero first */
1558 caVoice->pBuf = NULL;
1559 /* Create the AudioBufferList structure with one buffer. */
1560 caVoice->bufferList.mNumberBuffers = 1;
1561 /* Initialize the buffer to nothing. */
1562 caVoice->bufferList.mBuffers[0].mNumberChannels = caVoice->streamFormat.mChannelsPerFrame;
1563 caVoice->bufferList.mBuffers[0].mDataByteSize = 0;
1564 caVoice->bufferList.mBuffers[0].mData = NULL;
1565
1566 /* Make sure that the ring buffer is big enough to hold the recording
1567 * data. Compare the maximum frames per slice value with the frames
1568 * necessary when using the converter where the sample rate could differ.
1569 * The result is always multiplied by the channels per frame to get the
1570 * samples count. */
1571 hw->samples = RT_MAX( cFrames,
1572 (cFrames * caVoice->deviceFormat.mBytesPerFrame * caVoice->sampleRatio) / caVoice->streamFormat.mBytesPerFrame)
1573 * caVoice->streamFormat.mChannelsPerFrame;
1574 /* Create the internal ring buffer. */
1575 IORingBufferCreate(&caVoice->pBuf, hw->samples << hw->info.shift);
1576 if (VALID_PTR(caVoice->pBuf))
1577 rc = 0;
1578 else
1579 LogRel(("CoreAudio: [Input] Failed to create internal ring buffer\n"));
1580
1581 if (rc != 0)
1582 {
1583 if (caVoice->pBuf)
1584 IORingBufferDestroy(caVoice->pBuf);
1585 AudioUnitUninitialize(caVoice->audioUnit);
1586 }
1587
1588 Log(("CoreAudio: [Input] HW samples: %d; Frame count: %RU32\n", hw->samples, cFrames));
1589
1590 return 0;
1591}
1592
1593static void coreaudio_fini_in(HWVoiceIn *hw)
1594{
1595 int rc = 0;
1596 OSStatus err = noErr;
1597 caVoiceIn *caVoice = (caVoiceIn *) hw;
1598
1599 rc = coreaudio_ctl_in(hw, VOICE_DISABLE);
1600 if (RT_LIKELY(rc == 0))
1601 {
1602 if (caVoice->converter)
1603 AudioConverterDispose(caVoice->converter);
1604 err = AudioUnitUninitialize(caVoice->audioUnit);
1605 if (RT_LIKELY(err == noErr))
1606 {
1607 err = CloseComponent(caVoice->audioUnit);
1608 if (RT_LIKELY(err == noErr))
1609 {
1610 caVoice->audioUnit = NULL;
1611 caVoice->audioDeviceId = kAudioDeviceUnknown;
1612 IORingBufferDestroy(caVoice->pBuf);
1613 }
1614 else
1615 LogRel(("CoreAudio: [Input] Failed to close the AudioUnit (%RI32)\n", err));
1616 }
1617 else
1618 LogRel(("CoreAudio: [Input] Failed to uninitialize the AudioUnit (%RI32)\n", err));
1619 }
1620 else
1621 LogRel(("CoreAudio: [Input] Failed to stop recording (%RI32)\n", err));
1622}
1623
1624/*******************************************************************************
1625 *
1626 * CoreAudio global section
1627 *
1628 ******************************************************************************/
1629
1630static void *coreaudio_audio_init(void)
1631{
1632 return &conf;
1633}
1634
1635static void coreaudio_audio_fini(void *opaque)
1636{
1637 NOREF(opaque);
1638}
1639
1640static struct audio_option coreaudio_options[] =
1641{
1642 {"BUFFER_SIZE", AUD_OPT_INT, &conf.cBufferFrames,
1643 "Size of the buffer in frames", NULL, 0},
1644 {NULL, 0, NULL, NULL, NULL, 0}
1645};
1646
1647static struct audio_pcm_ops coreaudio_pcm_ops =
1648{
1649 coreaudio_init_out,
1650 coreaudio_fini_out,
1651 coreaudio_run_out,
1652 coreaudio_write,
1653 coreaudio_ctl_out,
1654
1655 coreaudio_init_in,
1656 coreaudio_fini_in,
1657 coreaudio_run_in,
1658 coreaudio_read,
1659 coreaudio_ctl_in
1660};
1661
1662struct audio_driver coreaudio_audio_driver =
1663{
1664 INIT_FIELD(name =) "coreaudio",
1665 INIT_FIELD(descr =)
1666 "CoreAudio http://developer.apple.com/audio/coreaudio.html",
1667 INIT_FIELD(options =) coreaudio_options,
1668 INIT_FIELD(init =) coreaudio_audio_init,
1669 INIT_FIELD(fini =) coreaudio_audio_fini,
1670 INIT_FIELD(pcm_ops =) &coreaudio_pcm_ops,
1671 INIT_FIELD(can_be_default =) 1,
1672 INIT_FIELD(max_voices_out =) 1,
1673 INIT_FIELD(max_voices_in =) 1,
1674 INIT_FIELD(voice_size_out =) sizeof(caVoiceOut),
1675 INIT_FIELD(voice_size_in =) sizeof(caVoiceIn)
1676};
1677
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette