VirtualBox

source: vbox/trunk/src/VBox/Devices/Audio/coreaudio.c@ 25917

Last change on this file since 25917 was 25917, checked in by vboxsync, 15 years ago

Audio-OSX: burn fix

  • Property svn:eol-style set to native
File size: 64.2 KB
Line 
1/* $Id$ */
2/** @file
3 * VBox audio devices: Mac OS X CoreAudio audio driver
4 */
5
6/*
7 * Copyright (C) 2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#define LOG_GROUP LOG_GROUP_DEV_AUDIO
23#include <VBox/log.h>
24#include <iprt/mem.h>
25#include <iprt/cdefs.h>
26
27#define AUDIO_CAP "coreaudio"
28#include "vl_vbox.h"
29#include "audio.h"
30#include "audio_int.h"
31
32#include <CoreAudio/CoreAudio.h>
33#include <CoreServices/CoreServices.h>
34#include <AudioUnit/AudioUnit.h>
35#include <AudioToolbox/AudioConverter.h>
36
37/* todo:
38 * - checking for properties changes of the devices
39 * - checking for changing of the default device
40 * - let the user set the device used (use config)
41 * - try to set frame size (use config)
42 * - maybe make sure the threads are immediately stopped if playing/recording stops
43 */
44
45/* Most of this is based on:
46 * http://developer.apple.com/mac/library/technotes/tn2004/tn2097.html
47 * http://developer.apple.com/mac/library/technotes/tn2002/tn2091.html
48 * http://developer.apple.com/mac/library/qa/qa2007/qa1533.html
49 * http://developer.apple.com/mac/library/qa/qa2001/qa1317.html
50 * http://developer.apple.com/mac/library/documentation/AudioUnit/Reference/AUComponentServicesReference/Reference/reference.html
51 */
52
53/*******************************************************************************
54 *
55 * IO Ring Buffer section
56 *
57 ******************************************************************************/
58
59/* Implementation of a lock free ring buffer which could be used in a multi
60 * threaded environment. Note that only the acquire, release and getter
61 * functions are threading aware. So don't use reset if the ring buffer is
62 * still in use. */
63typedef struct IORINGBUFFER
64{
65 /* The current read position in the buffer */
66 uint32_t uReadPos;
67 /* The current write position in the buffer */
68 uint32_t uWritePos;
69 /* How much space of the buffer is currently in use */
70 volatile uint32_t cBufferUsed;
71 /* How big is the buffer */
72 uint32_t cBufSize;
73 /* The buffer itself */
74 char *pBuffer;
75} IORINGBUFFER;
76/* Pointer to an ring buffer structure */
77typedef IORINGBUFFER* PIORINGBUFFER;
78
79
80static void IORingBufferCreate(PIORINGBUFFER *ppBuffer, uint32_t cSize)
81{
82 PIORINGBUFFER pTmpBuffer;
83
84 AssertPtr(ppBuffer);
85
86 *ppBuffer = NULL;
87 pTmpBuffer = RTMemAllocZ(sizeof(IORINGBUFFER));
88 if (pTmpBuffer)
89 {
90 pTmpBuffer->pBuffer = RTMemAlloc(cSize);
91 if(pTmpBuffer->pBuffer)
92 {
93 pTmpBuffer->cBufSize = cSize;
94 *ppBuffer = pTmpBuffer;
95 }
96 else
97 RTMemFree(pTmpBuffer);
98 }
99}
100
101static void IORingBufferDestroy(PIORINGBUFFER pBuffer)
102{
103 if (pBuffer)
104 {
105 if (pBuffer->pBuffer)
106 RTMemFree(pBuffer->pBuffer);
107 RTMemFree(pBuffer);
108 }
109}
110
111DECL_FORCE_INLINE(void) IORingBufferReset(PIORINGBUFFER pBuffer)
112{
113 AssertPtr(pBuffer);
114
115 pBuffer->uReadPos = 0;
116 pBuffer->uWritePos = 0;
117 pBuffer->cBufferUsed = 0;
118}
119
120DECL_FORCE_INLINE(uint32_t) IORingBufferFree(PIORINGBUFFER pBuffer)
121{
122 AssertPtr(pBuffer);
123 return pBuffer->cBufSize - pBuffer->cBufferUsed;
124}
125
126DECL_FORCE_INLINE(uint32_t) IORingBufferUsed(PIORINGBUFFER pBuffer)
127{
128 AssertPtr(pBuffer);
129 return pBuffer->cBufferUsed;
130}
131
132DECL_FORCE_INLINE(uint32_t) IORingBufferSize(PIORINGBUFFER pBuffer)
133{
134 AssertPtr(pBuffer);
135 return pBuffer->cBufSize;
136}
137
138static void IORingBufferAquireReadBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
139{
140 uint32_t uUsed = 0;
141 uint32_t uSize = 0;
142
143 AssertPtr(pBuffer);
144
145 *ppStart = 0;
146 *pcSize = 0;
147
148 /* How much is in use? */
149 uUsed = ASMAtomicAddU32(&pBuffer->cBufferUsed, 0);
150 if (uUsed > 0)
151 {
152 /* Get the size out of the requested size, the read block till the end
153 * of the buffer & the currently used size. */
154 uSize = RT_MIN(cReqSize, RT_MIN(pBuffer->cBufSize - pBuffer->uReadPos, uUsed));
155 if (uSize > 0)
156 {
157 /* Return the pointer address which point to the current read
158 * position. */
159 *ppStart = pBuffer->pBuffer + pBuffer->uReadPos;
160 *pcSize = uSize;
161 }
162 }
163}
164
165DECL_FORCE_INLINE(void) IORingBufferReleaseReadBlock(PIORINGBUFFER pBuffer, uint32_t cSize)
166{
167 AssertPtr(pBuffer);
168
169 /* Split at the end of the buffer. */
170 pBuffer->uReadPos = (pBuffer->uReadPos + cSize) % pBuffer->cBufSize;
171
172 ASMAtomicSubU32((int32_t*)&pBuffer->cBufferUsed, cSize);
173}
174
175static void IORingBufferAquireWriteBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
176{
177 uint32_t uFree;
178 uint32_t uSize;
179
180 AssertPtr(pBuffer);
181
182 *ppStart = 0;
183 *pcSize = 0;
184
185 /* How much is free? */
186 uFree = pBuffer->cBufSize - ASMAtomicAddU32(&pBuffer->cBufferUsed, 0);
187 if (uFree > 0)
188 {
189 /* Get the size out of the requested size, the write block till the end
190 * of the buffer & the currently free size. */
191 uSize = RT_MIN(cReqSize, RT_MIN(pBuffer->cBufSize - pBuffer->uWritePos, uFree));
192 if (uSize > 0)
193 {
194 /* Return the pointer address which point to the current write
195 * position. */
196 *ppStart = pBuffer->pBuffer + pBuffer->uWritePos;
197 *pcSize = uSize;
198 }
199 }
200}
201
202DECL_FORCE_INLINE(void) IORingBufferReleaseWriteBlock(PIORINGBUFFER pBuffer, uint32_t cSize)
203{
204 AssertPtr(pBuffer);
205
206 /* Split at the end of the buffer. */
207 pBuffer->uWritePos = (pBuffer->uWritePos + cSize) % pBuffer->cBufSize;
208
209 ASMAtomicAddU32(&pBuffer->cBufferUsed, cSize);
210}
211
212/*******************************************************************************
213 *
214 * Helper function section
215 *
216 ******************************************************************************/
217
218#if DEBUG
219static void caDebugOutputAudioStreamBasicDescription(const char *pszDesc, const AudioStreamBasicDescription *pStreamDesc)
220{
221 char pszSampleRate[32];
222 Log(("%s AudioStreamBasicDescription:\n", pszDesc));
223 Log(("CoreAudio: Format ID: %RU32 (%c%c%c%c)\n", pStreamDesc->mFormatID, RT_BYTE4(pStreamDesc->mFormatID), RT_BYTE3(pStreamDesc->mFormatID), RT_BYTE2(pStreamDesc->mFormatID), RT_BYTE1(pStreamDesc->mFormatID)));
224 Log(("CoreAudio: Flags: %RU32", pStreamDesc->mFormatFlags));
225 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsFloat)
226 Log((" Float"));
227 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsBigEndian)
228 Log((" BigEndian"));
229 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsSignedInteger)
230 Log((" SignedInteger"));
231 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsPacked)
232 Log((" Packed"));
233 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsAlignedHigh)
234 Log((" AlignedHigh"));
235 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsNonInterleaved)
236 Log((" NonInterleaved"));
237 if (pStreamDesc->mFormatFlags & kAudioFormatFlagIsNonMixable)
238 Log((" NonMixable"));
239 if (pStreamDesc->mFormatFlags & kAudioFormatFlagsAreAllClear)
240 Log((" AllClear"));
241 Log(("\n"));
242 snprintf(pszSampleRate, 32, "%.2f", (float)pStreamDesc->mSampleRate);
243 Log(("CoreAudio: SampleRate: %s\n", pszSampleRate));
244 Log(("CoreAudio: ChannelsPerFrame: %RU32\n", pStreamDesc->mChannelsPerFrame));
245 Log(("CoreAudio: FramesPerPacket: %RU32\n", pStreamDesc->mFramesPerPacket));
246 Log(("CoreAudio: BitsPerChannel: %RU32\n", pStreamDesc->mBitsPerChannel));
247 Log(("CoreAudio: BytesPerFrame: %RU32\n", pStreamDesc->mBytesPerFrame));
248 Log(("CoreAudio: BytesPerPacket: %RU32\n", pStreamDesc->mBytesPerPacket));
249}
250#endif /* DEBUG */
251
252static void caAudioSettingsToAudioStreamBasicDescription(const audsettings_t *pAS, AudioStreamBasicDescription *pStreamDesc)
253{
254 pStreamDesc->mFormatID = kAudioFormatLinearPCM;
255 pStreamDesc->mFormatFlags = kAudioFormatFlagIsPacked;
256 pStreamDesc->mFramesPerPacket = 1;
257 pStreamDesc->mSampleRate = (Float64)pAS->freq;
258 pStreamDesc->mChannelsPerFrame = pAS->nchannels;
259 switch (pAS->fmt)
260 {
261 case AUD_FMT_U8:
262 {
263 pStreamDesc->mBitsPerChannel = 8;
264 break;
265 }
266 case AUD_FMT_S8:
267 {
268 pStreamDesc->mBitsPerChannel = 8;
269 pStreamDesc->mFormatFlags |= kAudioFormatFlagIsSignedInteger;
270 break;
271 }
272 case AUD_FMT_U16:
273 {
274 pStreamDesc->mBitsPerChannel = 16;
275 break;
276 }
277 case AUD_FMT_S16:
278 {
279 pStreamDesc->mBitsPerChannel = 16;
280 pStreamDesc->mFormatFlags |= kAudioFormatFlagIsSignedInteger;
281 break;
282 }
283#ifdef PA_SAMPLE_S32LE
284 case AUD_FMT_U32:
285 {
286 pStreamDesc->mBitsPerChannel = 32;
287 break;
288 }
289 case AUD_FMT_S32:
290 {
291 pStreamDesc->mBitsPerChannel = 32;
292 pStreamDesc->mFormatFlags |= kAudioFormatFlagIsSignedInteger;
293 break;
294 }
295#endif
296 default:
297 break;
298 }
299 pStreamDesc->mBytesPerFrame = pStreamDesc->mChannelsPerFrame * (pStreamDesc->mBitsPerChannel / 8);
300 pStreamDesc->mBytesPerPacket = pStreamDesc->mFramesPerPacket * pStreamDesc->mBytesPerFrame;
301}
302
303static OSStatus caSetFrameBufferSize(AudioDeviceID device, bool fInput, UInt32 cReqSize, UInt32 *pcActSize)
304{
305 OSStatus err = noErr;
306 UInt32 cSize = 0;
307 AudioValueRange *pRange = NULL;
308 size_t a = 0;
309 Float64 cMin = -1;
310 Float64 cMax = -1;
311
312 /* First try to set the new frame buffer size. */
313 AudioDeviceSetProperty(device,
314 NULL,
315 0,
316 fInput,
317 kAudioDevicePropertyBufferFrameSize,
318 sizeof(cReqSize),
319 &cReqSize);
320 /* Check if it really was set. */
321 cSize = sizeof(*pcActSize);
322 err = AudioDeviceGetProperty(device,
323 0,
324 fInput,
325 kAudioDevicePropertyBufferFrameSize,
326 &cSize,
327 pcActSize);
328 if (RT_UNLIKELY(err != noErr))
329 return err;
330 /* If both sizes are the same, we are done. */
331 if (cReqSize == *pcActSize)
332 return noErr;
333 /* If not we have to check the limits of the device. First get the size of
334 the buffer size range property. */
335 err = AudioDeviceGetPropertyInfo(device,
336 0,
337 fInput,
338 kAudioDevicePropertyBufferSizeRange,
339 &cSize,
340 NULL);
341 if (RT_UNLIKELY(err != noErr))
342 return err;
343 pRange = RTMemAllocZ(cSize);
344 if (VALID_PTR(pRange))
345 {
346 err = AudioDeviceGetProperty(device,
347 0,
348 fInput,
349 kAudioDevicePropertyBufferSizeRange,
350 &cSize,
351 pRange);
352 if (RT_LIKELY(err == noErr))
353 {
354 for (a=0; a < cSize/sizeof(AudioValueRange); ++a)
355 {
356 /* Search for the absolute minimum. */
357 if ( pRange[a].mMinimum < cMin
358 || cMin == -1)
359 cMin = pRange[a].mMinimum;
360 /* Search for the best maximum which isn't bigger than
361 cReqSize. */
362 if (pRange[a].mMaximum < cReqSize)
363 {
364 if (pRange[a].mMaximum > cMax)
365 cMax = pRange[a].mMaximum;
366 }
367 }
368 if (cMax == -1)
369 cMax = cMin;
370 cReqSize = cMax;
371 /* First try to set the new frame buffer size. */
372 AudioDeviceSetProperty(device,
373 NULL,
374 0,
375 fInput,
376 kAudioDevicePropertyBufferFrameSize,
377 sizeof(cReqSize),
378 &cReqSize);
379 /* Check if it really was set. */
380 cSize = sizeof(*pcActSize);
381 err = AudioDeviceGetProperty(device,
382 0,
383 fInput,
384 kAudioDevicePropertyBufferFrameSize,
385 &cSize,
386 pcActSize);
387 }
388 }
389 else
390 return notEnoughMemoryErr;
391
392 RTMemFree(pRange);
393 return err;
394}
395
396DECL_FORCE_INLINE(bool) caIsRunning(AudioDeviceID deviceID)
397{
398 OSStatus err = noErr;
399 UInt32 uFlag = 0;
400 UInt32 uSize = sizeof(uFlag);
401 err = AudioDeviceGetProperty(deviceID,
402 0,
403 0,
404 kAudioDevicePropertyDeviceIsRunning,
405 &uSize,
406 &uFlag);
407 if (err != kAudioHardwareNoError)
408 LogRel(("CoreAudio: Could not determine whether the device is running (%RI32)\n", err));
409 return uFlag >= 1;
410}
411
412/*******************************************************************************
413 *
414 * Global structures section
415 *
416 ******************************************************************************/
417
418struct
419{
420 int cBufferFrames;
421} conf =
422{
423 INIT_FIELD(.cBufferFrames =) 512
424};
425
426typedef struct caVoiceOut
427{
428 /* HW voice output structure defined by VBox */
429 HWVoiceOut hw;
430 /* Stream description which is default on the device */
431 AudioStreamBasicDescription deviceFormat;
432 /* Stream description which is selected for using by VBox */
433 AudioStreamBasicDescription streamFormat;
434 /* The audio device ID of the currently used device */
435 AudioDeviceID audioDeviceId;
436 /* The AudioUnit used */
437 AudioUnit audioUnit;
438 /* A ring buffer for transferring data to the playback thread */
439 PIORINGBUFFER pBuf;
440} caVoiceOut;
441
442typedef struct caVoiceIn
443{
444 /* HW voice input structure defined by VBox */
445 HWVoiceIn hw;
446 /* Stream description which is default on the device */
447 AudioStreamBasicDescription deviceFormat;
448 /* Stream description which is selected for using by VBox */
449 AudioStreamBasicDescription streamFormat;
450 /* The audio device ID of the currently used device */
451 AudioDeviceID audioDeviceId;
452 /* The AudioUnit used */
453 AudioUnit audioUnit;
454 /* The audio converter if necessary */
455 AudioConverterRef converter;
456 /* A temporary position value used in the caConverterCallback function */
457 uint32_t rpos;
458 /* The ratio between the device & the stream sample rate */
459 Float64 sampleRatio;
460 /* An extra buffer used for render the audio data in the recording thread */
461 AudioBufferList bufferList;
462 /* A ring buffer for transferring data from the recording thread */
463 PIORINGBUFFER pBuf;
464} caVoiceIn;
465
466/* Error code which indicates "End of data" */
467static const OSStatus caConverterEOFDErr = 0x656F6664; /* 'eofd' */
468
469/*******************************************************************************
470 *
471 * CoreAudio output section
472 *
473 ******************************************************************************/
474
475/* callback to feed audio output buffer */
476static OSStatus caPlaybackCallback(void* inRefCon,
477 AudioUnitRenderActionFlags* ioActionFlags,
478 const AudioTimeStamp* inTimeStamp,
479 UInt32 inBusNumber,
480 UInt32 inNumberFrames,
481 AudioBufferList* ioData)
482{
483 uint32_t csAvail = 0;
484 uint32_t cbToRead = 0;
485 uint32_t csToRead = 0;
486 uint32_t csReads = 0;
487 char *pcSrc = NULL;
488
489 caVoiceOut *caVoice = (caVoiceOut *) inRefCon;
490
491 /* How much space is used in the ring buffer? */
492 csAvail = IORingBufferUsed(caVoice->pBuf) >> caVoice->hw.info.shift; /* bytes -> samples */
493 /* How much space is available in the core audio buffer. Use the smaller
494 * size of the too. */
495 csAvail = RT_MIN(csAvail, ioData->mBuffers[0].mDataByteSize >> caVoice->hw.info.shift);
496
497 Log2(("CoreAudio: [Output] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
498
499 /* Iterate as long as data is available */
500 while(csReads < csAvail)
501 {
502 /* How much is left? */
503 csToRead = csAvail - csReads;
504 cbToRead = csToRead << caVoice->hw.info.shift; /* samples -> bytes */
505 Log2(("CoreAudio: [Output] Try reading %RU32 samples (%RU32 bytes)\n", csToRead, cbToRead));
506 /* Try to aquire the necessary block from the ring buffer. */
507 IORingBufferAquireReadBlock(caVoice->pBuf, cbToRead, &pcSrc, &cbToRead);
508 /* How much to we get? */
509 csToRead = cbToRead >> caVoice->hw.info.shift; /* bytes -> samples */
510 Log2(("CoreAudio: [Output] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
511 /* Break if nothing is used anymore. */
512 if (RT_UNLIKELY(cbToRead == 0))
513 break;
514 /* Copy the data from our ring buffer to the core audio buffer. */
515 memcpy((char*)ioData->mBuffers[0].mData + (csReads << caVoice->hw.info.shift), pcSrc, cbToRead);
516 /* Release the read buffer, so it could be used for new data. */
517 IORingBufferReleaseReadBlock(caVoice->pBuf, cbToRead);
518 /* How much have we reads so far. */
519 csReads += csToRead;
520 }
521 /* Write the bytes to the core audio buffer which where really written. */
522 ioData->mBuffers[0].mDataByteSize = csReads << caVoice->hw.info.shift; /* samples -> bytes */
523
524 Log2(("CoreAudio: [Output] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
525
526 return noErr;
527}
528
529static int coreaudio_run_out(HWVoiceOut *hw)
530{
531 uint32_t csAvail = 0;
532 uint32_t cbToWrite = 0;
533 uint32_t csToWrite = 0;
534 uint32_t csWritten = 0;
535 char *pcDst = NULL;
536 st_sample_t *psSrc = NULL;
537
538 caVoiceOut *caVoice = (caVoiceOut *) hw;
539
540 /* How much space is available in the ring buffer */
541 csAvail = IORingBufferFree(caVoice->pBuf) >> hw->info.shift; /* bytes -> samples */
542 /* How much data is availabe. Use the smaller size of the too. */
543 csAvail = RT_MIN(csAvail, (uint32_t)audio_pcm_hw_get_live_out(hw));
544
545 Log2(("CoreAudio: [Output] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << hw->info.shift));
546
547 /* Iterate as long as data is available */
548 while (csWritten < csAvail)
549 {
550 /* How much is left? Split request at the end of our samples buffer. */
551 csToWrite = RT_MIN(csAvail - csWritten, (uint32_t)(hw->samples - hw->rpos));
552 cbToWrite = csToWrite << hw->info.shift; /* samples -> bytes */
553 Log2(("CoreAudio: [Output] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
554 /* Try to aquire the necessary space from the ring buffer. */
555 IORingBufferAquireWriteBlock(caVoice->pBuf, cbToWrite, &pcDst, &cbToWrite);
556 /* How much to we get? */
557 csToWrite = cbToWrite >> hw->info.shift;
558 Log2(("CoreAudio: [Output] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
559 /* Break if nothing is free anymore. */
560 if (RT_UNLIKELY(cbToWrite == 0))
561 break;
562 /* Copy the data from our mix buffer to the ring buffer. */
563 psSrc = hw->mix_buf + hw->rpos;
564 hw->clip((uint8_t*)pcDst, psSrc, csToWrite);
565 /* Release the ring buffer, so the read thread could start reading this data. */
566 IORingBufferReleaseWriteBlock(caVoice->pBuf, cbToWrite);
567 hw->rpos = (hw->rpos + csToWrite) % hw->samples;
568 /* How much have we written so far. */
569 csWritten += csToWrite;
570 }
571
572 Log2(("CoreAudio: [Output] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << hw->info.shift));
573
574 /* Return the count of samples we have processed. */
575 return csWritten;
576}
577
578static int coreaudio_write(SWVoiceOut *sw, void *buf, int len)
579{
580 return audio_pcm_sw_write (sw, buf, len);
581}
582
583static int coreaudio_ctl_out(HWVoiceOut *hw, int cmd, ...)
584{
585 OSStatus err = noErr;
586 caVoiceOut *caVoice = (caVoiceOut *) hw;
587
588 switch (cmd)
589 {
590 case VOICE_ENABLE:
591 {
592 /* Only start the device if it is actually stopped */
593 if (!caIsRunning(caVoice->audioDeviceId))
594 {
595 IORingBufferReset(caVoice->pBuf);
596 err = AudioOutputUnitStart(caVoice->audioUnit);
597 if (RT_UNLIKELY(err != noErr))
598 {
599 LogRel(("CoreAudio: [Output] Failed to start playback (%RI32)\n", err));
600 return -1;
601 }
602 }
603 break;
604 }
605 case VOICE_DISABLE:
606 {
607 /* Only stop the device if it is actually running */
608 if (caIsRunning(caVoice->audioDeviceId))
609 {
610 err = AudioOutputUnitStop(caVoice->audioUnit);
611 if (RT_UNLIKELY(err != noErr))
612 {
613 LogRel(("CoreAudio: [Output] Failed to stop playback (%RI32)\n", err));
614 return -1;
615 }
616 err = AudioUnitReset(caVoice->audioUnit,
617 kAudioUnitScope_Input,
618 0);
619 if (RT_UNLIKELY(err != noErr))
620 {
621 LogRel(("CoreAudio: [Output] Failed to reset AudioUnit (%RI32)\n", err));
622 return -1;
623 }
624 }
625 break;
626 }
627 }
628 return 0;
629}
630
631static int coreaudio_init_out(HWVoiceOut *hw, audsettings_t *as)
632{
633 OSStatus err = noErr;
634 UInt32 uSize = 0; /* temporary size of properties */
635 UInt32 uFlag = 0; /* for setting flags */
636 CFStringRef name; /* for the temporary device name fetching */
637 const char *pszName;
638 ComponentDescription cd; /* description for an audio component */
639 Component cp; /* an audio component */
640 AURenderCallbackStruct cb; /* holds the callback structure */
641 UInt32 cFrames; /* default frame count */
642
643 caVoiceOut *caVoice = (caVoiceOut *) hw;
644
645 caVoice->audioUnit = NULL;
646 caVoice->audioDeviceId = kAudioDeviceUnknown;
647
648 /* Initialize the hardware info section with the audio settings */
649 audio_pcm_init_info(&hw->info, as);
650
651 /* Fetch the default audio output device currently in use */
652 uSize = sizeof(caVoice->audioDeviceId);
653 err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
654 &uSize,
655 &caVoice->audioDeviceId);
656 if (RT_UNLIKELY(err != noErr))
657 {
658 LogRel(("CoreAudio: [Output] Unable to find default output device (%RI32)\n", err));
659 return -1;
660 }
661
662 /* Try to get the name of the default output device and log it. It's not
663 * fatal if it fails. */
664 uSize = sizeof(CFStringRef);
665 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
666 0,
667 0,
668 kAudioObjectPropertyName,
669 &uSize,
670 &name);
671 if (RT_LIKELY(err == noErr))
672 {
673 pszName = CFStringGetCStringPtr(name, kCFStringEncodingMacRoman);
674 if (pszName)
675 LogRel(("CoreAudio: Using default output device: %s\n", pszName));
676 CFRelease(name);
677 }
678 else
679 LogRel(("CoreAudio: [Output] Unable to get output device name (%RI32)\n", err));
680
681 /* Get the default frames buffer size, so that we can setup our internal
682 * buffers. */
683 uSize = sizeof(cFrames);
684 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
685 0,
686 false,
687 kAudioDevicePropertyBufferFrameSize,
688 &uSize,
689 &cFrames);
690 if (RT_UNLIKELY(err != noErr))
691 {
692 LogRel(("CoreAudio: [Output] Failed to get frame buffer size of the audio device (%RI32)\n", err));
693 return -1;
694 }
695 /* Set the frame buffer size and honor any minimum/maximum restrictions on
696 the device. */
697 err = caSetFrameBufferSize(caVoice->audioDeviceId,
698 false,
699 cFrames,
700 &cFrames);
701 if (RT_UNLIKELY(err != noErr))
702 {
703 LogRel(("CoreAudio: [Output] Failed to set frame buffer size on the audio device (%RI32)\n", err));
704 return -1;
705 }
706
707 cd.componentType = kAudioUnitType_Output;
708 cd.componentSubType = kAudioUnitSubType_HALOutput;
709 cd.componentManufacturer = kAudioUnitManufacturer_Apple;
710 cd.componentFlags = 0;
711 cd.componentFlagsMask = 0;
712
713 /* Try to find the default HAL output component. */
714 cp = FindNextComponent(NULL, &cd);
715 if (RT_UNLIKELY(cp == 0))
716 {
717 LogRel(("CoreAudio: [Output] Failed to find HAL output component\n"));
718 return -1;
719 }
720
721 /* Open the default HAL output component. */
722 err = OpenAComponent(cp, &caVoice->audioUnit);
723 if (RT_UNLIKELY(err != noErr))
724 {
725 LogRel(("CoreAudio: [Output] Failed to open output component (%RI32)\n", err));
726 return -1;
727 }
728
729 /* Switch the I/O mode for output to on. */
730 uFlag = 1;
731 err = AudioUnitSetProperty(caVoice->audioUnit,
732 kAudioOutputUnitProperty_EnableIO,
733 kAudioUnitScope_Output,
734 0,
735 &uFlag,
736 sizeof(uFlag));
737 if (RT_UNLIKELY(err != noErr))
738 {
739 LogRel(("CoreAudio: [Output] Failed to set output I/O mode enabled (%RI32)\n", err));
740 return -1;
741 }
742
743 /* Set the default audio output device as the device for the new AudioUnit. */
744 err = AudioUnitSetProperty(caVoice->audioUnit,
745 kAudioOutputUnitProperty_CurrentDevice,
746 kAudioUnitScope_Output,
747 0,
748 &caVoice->audioDeviceId,
749 sizeof(caVoice->audioDeviceId));
750 if (RT_UNLIKELY(err != noErr))
751 {
752 LogRel(("CoreAudio: [Output] Failed to set current device (%RI32)\n", err));
753 return -1;
754 }
755
756 /* CoreAudio will inform us on a second thread when it needs more data for
757 * output. Therefor register an callback function which will provide the new
758 * data. */
759 cb.inputProc = caPlaybackCallback;
760 cb.inputProcRefCon = caVoice;
761
762 err = AudioUnitSetProperty(caVoice->audioUnit,
763 kAudioUnitProperty_SetRenderCallback,
764 kAudioUnitScope_Input,
765 0,
766 &cb,
767 sizeof(cb));
768 if (RT_UNLIKELY(err != noErr))
769 {
770 LogRel(("CoreAudio: [Output] Failed to set callback (%RI32)\n", err));
771 return -1;
772 }
773
774 /* Set the quality of the output render to the maximum. */
775/* uFlag = kRenderQuality_High;*/
776/* err = AudioUnitSetProperty(caVoice->audioUnit,*/
777/* kAudioUnitProperty_RenderQuality,*/
778/* kAudioUnitScope_Global,*/
779/* 0,*/
780/* &uFlag,*/
781/* sizeof(uFlag));*/
782 /* Not fatal */
783/* if (RT_UNLIKELY(err != noErr))*/
784/* LogRel(("CoreAudio: [Output] Failed to set the render quality to the maximum (%RI32)\n", err));*/
785
786 /* Fetch the current stream format of the device. */
787 uSize = sizeof(caVoice->deviceFormat);
788 err = AudioUnitGetProperty(caVoice->audioUnit,
789 kAudioUnitProperty_StreamFormat,
790 kAudioUnitScope_Input,
791 0,
792 &caVoice->deviceFormat,
793 &uSize);
794 if (RT_UNLIKELY(err != noErr))
795 {
796 LogRel(("CoreAudio: [Output] Failed to get device format (%RI32)\n", err));
797 return -1;
798 }
799
800 /* Create an AudioStreamBasicDescription based on the audio settings of
801 * VirtualBox. */
802 caAudioSettingsToAudioStreamBasicDescription(as, &caVoice->streamFormat);
803
804#if DEBUG
805 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Output] device", &caVoice->deviceFormat);
806 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Output] output", &caVoice->streamFormat);
807#endif /* DEBUG */
808
809 /* Set the device format description for the stream. */
810 err = AudioUnitSetProperty(caVoice->audioUnit,
811 kAudioUnitProperty_StreamFormat,
812 kAudioUnitScope_Input,
813 0,
814 &caVoice->streamFormat,
815 sizeof(caVoice->streamFormat));
816 if (RT_UNLIKELY(err != noErr))
817 {
818 LogRel(("CoreAudio: [Output] Failed to set stream format (%RI32)\n", err));
819 return -1;
820 }
821
822 uSize = sizeof(caVoice->deviceFormat);
823 err = AudioUnitGetProperty(caVoice->audioUnit,
824 kAudioUnitProperty_StreamFormat,
825 kAudioUnitScope_Input,
826 0,
827 &caVoice->deviceFormat,
828 &uSize);
829 if (RT_UNLIKELY(err != noErr))
830 {
831 LogRel(("CoreAudio: [Output] Failed to get device format (%RI32)\n", err));
832 return -1;
833 }
834
835 /* Also set the frame buffer size off the device on our AudioUnit. This
836 should make sure that the frames count which we receive in the render
837 thread is as we like. */
838 err = AudioUnitSetProperty(caVoice->audioUnit,
839 kAudioUnitProperty_MaximumFramesPerSlice,
840 kAudioUnitScope_Global,
841 0,
842 &cFrames,
843 sizeof(cFrames));
844 if (RT_UNLIKELY(err != noErr))
845 {
846 LogRel(("CoreAudio: [Output] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
847 return -1;
848 }
849
850 /* Finally initialize the new AudioUnit. */
851 err = AudioUnitInitialize(caVoice->audioUnit);
852 if (RT_UNLIKELY(err != noErr))
853 {
854 LogRel(("CoreAudio: [Output] Failed to initialize the AudioUnit (%RI32)\n", err));
855 return -1;
856 }
857
858 /* There are buggy devices (e.g. my bluetooth headset) which doesn't honor
859 * the frame buffer size set in the previous calls. So finally get the
860 * frame buffer size after the AudioUnit was initialized. */
861 uSize = sizeof(cFrames);
862 err = AudioUnitGetProperty(caVoice->audioUnit,
863 kAudioUnitProperty_MaximumFramesPerSlice,
864 kAudioUnitScope_Global,
865 0,
866 &cFrames,
867 &uSize);
868 if (RT_UNLIKELY(err != noErr))
869 {
870 LogRel(("CoreAudio: [Output] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
871 return -1;
872 }
873
874 /* Create the internal ring buffer. */
875 hw->samples = cFrames * caVoice->streamFormat.mChannelsPerFrame;
876 IORingBufferCreate(&caVoice->pBuf, hw->samples << hw->info.shift);
877 if (!VALID_PTR(caVoice->pBuf))
878 {
879 LogRel(("CoreAudio: [Output] Failed to create internal ring buffer\n"));
880 AudioUnitUninitialize(caVoice->audioUnit);
881 return -1;
882 }
883
884 Log(("CoreAudio: [Output] HW samples: %d; Frame count: %RU32\n", hw->samples, cFrames));
885
886 return 0;
887}
888
889static void coreaudio_fini_out(HWVoiceOut *hw)
890{
891 int rc = 0;
892 OSStatus err = noErr;
893 caVoiceOut *caVoice = (caVoiceOut *) hw;
894
895 rc = coreaudio_ctl_out(hw, VOICE_DISABLE);
896 if (RT_LIKELY(rc == 0))
897 {
898 err = AudioUnitUninitialize(caVoice->audioUnit);
899 if (RT_LIKELY(err == noErr))
900 {
901 err = CloseComponent(caVoice->audioUnit);
902 if (RT_LIKELY(err == noErr))
903 {
904 caVoice->audioUnit = NULL;
905 caVoice->audioDeviceId = kAudioDeviceUnknown;
906 IORingBufferDestroy(caVoice->pBuf);
907 }
908 else
909 LogRel(("CoreAudio: [Output] Failed to close the AudioUnit (%RI32)\n", err));
910 }
911 else
912 LogRel(("CoreAudio: [Output] Failed to uninitialize the AudioUnit (%RI32)\n", err));
913 }
914 else
915 LogRel(("CoreAudio: [Output] Failed to stop playback (%RI32)\n", err));
916}
917
918/*******************************************************************************
919 *
920 * CoreAudio input section
921 *
922 ******************************************************************************/
923
924/* callback to convert audio input data from one format to another */
925static OSStatus caConverterCallback(AudioConverterRef inAudioConverter,
926 UInt32 *ioNumberDataPackets,
927 AudioBufferList *ioData,
928 AudioStreamPacketDescription **outDataPacketDescription,
929 void *inUserData)
930{
931 /* In principle we had to check here if the source is non interleaved & if
932 * so go through all buffers not only the first one like now. */
933 UInt32 cSize = 0;
934
935 caVoiceIn *caVoice = (caVoiceIn *) inUserData;
936
937 const AudioBufferList *pBufferList = &caVoice->bufferList;
938/* Log2(("converting .... ################ %RU32 %RU32 %RU32 %RU32 %RU32\n", *ioNumberDataPackets, bufferList->mBuffers[i].mNumberChannels, bufferList->mNumberBuffers, bufferList->mBuffers[i].mDataByteSize, ioData->mNumberBuffers));*/
939
940 /* Use the lower one of the packets to process & the available packets in
941 * the buffer */
942 cSize = RT_MIN(*ioNumberDataPackets * caVoice->deviceFormat.mBytesPerPacket,
943 pBufferList->mBuffers[0].mDataByteSize - caVoice->rpos);
944 /* Set the new size on output, so the caller know what we have processed. */
945 *ioNumberDataPackets = cSize / caVoice->deviceFormat.mBytesPerPacket;
946 /* If no data is available anymore we return with an error code. This error
947 * code will be returned from AudioConverterFillComplexBuffer. */
948 if (*ioNumberDataPackets == 0)
949 {
950 ioData->mBuffers[0].mDataByteSize = 0;
951 ioData->mBuffers[0].mData = NULL;
952 return caConverterEOFDErr;
953 }
954 else
955 {
956 ioData->mBuffers[0].mNumberChannels = pBufferList->mBuffers[0].mNumberChannels;
957 ioData->mBuffers[0].mDataByteSize = cSize;
958 ioData->mBuffers[0].mData = (char*)pBufferList->mBuffers[0].mData + caVoice->rpos;
959 caVoice->rpos += cSize;
960
961 /* Log2(("converting .... ################ %RU32 %RU32\n", size, caVoice->rpos));*/
962 }
963
964 return noErr;
965}
966
967/* callback to feed audio input buffer */
968static OSStatus caRecordingCallback(void* inRefCon,
969 AudioUnitRenderActionFlags* ioActionFlags,
970 const AudioTimeStamp* inTimeStamp,
971 UInt32 inBusNumber,
972 UInt32 inNumberFrames,
973 AudioBufferList* ioData)
974{
975 OSStatus err = noErr;
976 uint32_t csAvail = 0;
977 uint32_t csToWrite = 0;
978 uint32_t cbToWrite = 0;
979 uint32_t csWritten = 0;
980 char *pcDst = NULL;
981 AudioBufferList tmpList;
982 UInt32 ioOutputDataPacketSize = 0;
983
984 caVoiceIn *caVoice = (caVoiceIn *) inRefCon;
985
986 /* If nothing is pending return immediately. */
987 if (inNumberFrames == 0)
988 return noErr;
989
990 /* Are we using an converter? */
991 if (VALID_PTR(caVoice->converter))
992 {
993 /* Firstly render the data as usual */
994 caVoice->bufferList.mBuffers[0].mNumberChannels = caVoice->deviceFormat.mChannelsPerFrame;
995 caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->deviceFormat.mBytesPerFrame * inNumberFrames;
996 caVoice->bufferList.mBuffers[0].mData = RTMemAlloc(caVoice->bufferList.mBuffers[0].mDataByteSize);
997
998 err = AudioUnitRender(caVoice->audioUnit,
999 ioActionFlags,
1000 inTimeStamp,
1001 inBusNumber,
1002 inNumberFrames,
1003 &caVoice->bufferList);
1004 if(RT_UNLIKELY(err != noErr))
1005 {
1006 Log(("CoreAudio: [Input] Failed to render audio data (%RI32)\n", err));
1007 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1008 return err;
1009 }
1010
1011 /* How much space is free in the ring buffer? */
1012 csAvail = IORingBufferFree(caVoice->pBuf) >> caVoice->hw.info.shift; /* bytes -> samples */
1013 /* How much space is used in the core audio buffer. Use the smaller size of
1014 * the too. */
1015 csAvail = RT_MIN(csAvail, (uint32_t)((caVoice->bufferList.mBuffers[0].mDataByteSize / caVoice->deviceFormat.mBytesPerFrame) * caVoice->sampleRatio));
1016
1017 Log2(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
1018 /* Initialize the temporary output buffer */
1019 tmpList.mNumberBuffers = 1;
1020 tmpList.mBuffers[0].mNumberChannels = caVoice->streamFormat.mChannelsPerFrame;
1021 /* Set the read position to zero. */
1022 caVoice->rpos = 0;
1023 /* Iterate as long as data is available */
1024 while(csWritten < csAvail)
1025 {
1026 /* How much is left? */
1027 csToWrite = csAvail - csWritten;
1028 cbToWrite = csToWrite << caVoice->hw.info.shift;
1029 Log2(("CoreAudio: [Input] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
1030 /* Try to acquire the necessary space from the ring buffer. */
1031 IORingBufferAquireWriteBlock(caVoice->pBuf, cbToWrite, &pcDst, &cbToWrite);
1032 /* How much to we get? */
1033 csToWrite = cbToWrite >> caVoice->hw.info.shift;
1034 Log2(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
1035 /* Break if nothing is free anymore. */
1036 if (RT_UNLIKELY(cbToWrite == 0))
1037 break;
1038
1039 /* Now set how much space is available for output */
1040 ioOutputDataPacketSize = cbToWrite / caVoice->streamFormat.mBytesPerPacket;
1041 /* Set our ring buffer as target. */
1042 tmpList.mBuffers[0].mDataByteSize = cbToWrite;
1043 tmpList.mBuffers[0].mData = pcDst;
1044 AudioConverterReset(caVoice->converter);
1045 err = AudioConverterFillComplexBuffer(caVoice->converter,
1046 caConverterCallback,
1047 caVoice,
1048 &ioOutputDataPacketSize,
1049 &tmpList,
1050 NULL);
1051 if( RT_UNLIKELY(err != noErr)
1052 && err != caConverterEOFDErr)
1053 {
1054 Log(("CoreAudio: [Input] Failed to convert audio data (%RI32:%c%c%c%c)\n", err, RT_BYTE4(err), RT_BYTE3(err), RT_BYTE2(err), RT_BYTE1(err)));
1055 break;
1056 }
1057 /* Check in any case what processed size is returned. It could be
1058 * much littler than we expected. */
1059 cbToWrite = ioOutputDataPacketSize * caVoice->streamFormat.mBytesPerPacket;
1060 csToWrite = cbToWrite >> caVoice->hw.info.shift;
1061 /* Release the ring buffer, so the main thread could start reading this data. */
1062 IORingBufferReleaseWriteBlock(caVoice->pBuf, cbToWrite);
1063 csWritten += csToWrite;
1064 /* If the error is "End of Data" it means there is no data anymore
1065 * which could be converted. So end here now. */
1066 if (err == caConverterEOFDErr)
1067 break;
1068 }
1069 /* Cleanup */
1070 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1071 Log2(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
1072 }
1073 else
1074 {
1075 caVoice->bufferList.mBuffers[0].mNumberChannels = caVoice->streamFormat.mChannelsPerFrame;
1076 caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->streamFormat.mBytesPerFrame * inNumberFrames;
1077 caVoice->bufferList.mBuffers[0].mData = RTMemAlloc(caVoice->bufferList.mBuffers[0].mDataByteSize);
1078
1079 err = AudioUnitRender(caVoice->audioUnit,
1080 ioActionFlags,
1081 inTimeStamp,
1082 inBusNumber,
1083 inNumberFrames,
1084 &caVoice->bufferList);
1085 if(RT_UNLIKELY(err != noErr))
1086 {
1087 Log(("CoreAudio: [Input] Failed to render audio data (%RI32)\n", err));
1088 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1089 return err;
1090 }
1091
1092 /* How much space is free in the ring buffer? */
1093 csAvail = IORingBufferFree(caVoice->pBuf) >> caVoice->hw.info.shift; /* bytes -> samples */
1094 /* How much space is used in the core audio buffer. Use the smaller size of
1095 * the too. */
1096 csAvail = RT_MIN(csAvail, caVoice->bufferList.mBuffers[0].mDataByteSize >> caVoice->hw.info.shift);
1097
1098 Log2(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
1099
1100 /* Iterate as long as data is available */
1101 while(csWritten < csAvail)
1102 {
1103 /* How much is left? */
1104 csToWrite = csAvail - csWritten;
1105 cbToWrite = csToWrite << caVoice->hw.info.shift;
1106 Log2(("CoreAudio: [Input] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
1107 /* Try to aquire the necessary space from the ring buffer. */
1108 IORingBufferAquireWriteBlock(caVoice->pBuf, cbToWrite, &pcDst, &cbToWrite);
1109 /* How much to we get? */
1110 csToWrite = cbToWrite >> caVoice->hw.info.shift;
1111 Log2(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
1112 /* Break if nothing is free anymore. */
1113 if (RT_UNLIKELY(cbToWrite == 0))
1114 break;
1115 /* Copy the data from the core audio buffer to the ring buffer. */
1116 memcpy(pcDst, (char*)caVoice->bufferList.mBuffers[0].mData + (csWritten << caVoice->hw.info.shift), cbToWrite);
1117 /* Release the ring buffer, so the main thread could start reading this data. */
1118 IORingBufferReleaseWriteBlock(caVoice->pBuf, cbToWrite);
1119 csWritten += csToWrite;
1120 }
1121 /* Cleanup */
1122 RTMemFree(caVoice->bufferList.mBuffers[0].mData);
1123
1124 Log2(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
1125 }
1126
1127 return err;
1128}
1129
1130static int coreaudio_run_in(HWVoiceIn *hw)
1131{
1132 uint32_t csAvail = 0;
1133 uint32_t cbToRead = 0;
1134 uint32_t csToRead = 0;
1135 uint32_t csReads = 0;
1136 char *pcSrc;
1137 st_sample_t *psDst;
1138
1139 caVoiceIn *caVoice = (caVoiceIn *) hw;
1140
1141 /* How much space is used in the ring buffer? */
1142 csAvail = IORingBufferUsed(caVoice->pBuf) >> hw->info.shift; /* bytes -> samples */
1143 /* How much space is available in the mix buffer. Use the smaller size of
1144 * the too. */
1145 csAvail = RT_MIN(csAvail, (uint32_t)(hw->samples - audio_pcm_hw_get_live_in (hw)));
1146
1147 Log2(("CoreAudio: [Input] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
1148
1149 /* Iterate as long as data is available */
1150 while (csReads < csAvail)
1151 {
1152 /* How much is left? Split request at the end of our samples buffer. */
1153 csToRead = RT_MIN(csAvail - csReads, (uint32_t)(hw->samples - hw->wpos));
1154 cbToRead = csToRead << hw->info.shift;
1155 Log2(("CoreAudio: [Input] Try reading %RU32 samples (%RU32 bytes)\n", csToRead, cbToRead));
1156 /* Try to aquire the necessary block from the ring buffer. */
1157 IORingBufferAquireReadBlock(caVoice->pBuf, cbToRead, &pcSrc, &cbToRead);
1158 /* How much to we get? */
1159 csToRead = cbToRead >> hw->info.shift;
1160 Log2(("CoreAudio: [Input] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
1161 /* Break if nothing is used anymore. */
1162 if (cbToRead == 0)
1163 break;
1164 /* Copy the data from our ring buffer to the mix buffer. */
1165 psDst = hw->conv_buf + hw->wpos;
1166 hw->conv(psDst, pcSrc, csToRead, &nominal_volume);
1167 /* Release the read buffer, so it could be used for new data. */
1168 IORingBufferReleaseReadBlock(caVoice->pBuf, cbToRead);
1169 hw->wpos = (hw->wpos + csToRead) % hw->samples;
1170 /* How much have we reads so far. */
1171 csReads += csToRead;
1172 }
1173
1174 Log2(("CoreAudio: [Input] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
1175
1176 return csReads;
1177}
1178
1179static int coreaudio_read(SWVoiceIn *sw, void *buf, int size)
1180{
1181 return audio_pcm_sw_read (sw, buf, size);
1182}
1183
1184static int coreaudio_ctl_in(HWVoiceIn *hw, int cmd, ...)
1185{
1186 OSStatus err = noErr;
1187 caVoiceIn *caVoice = (caVoiceIn *) hw;
1188
1189 switch (cmd)
1190 {
1191 case VOICE_ENABLE:
1192 {
1193 /* Only start the device if it is actually stopped */
1194 if (!caIsRunning(caVoice->audioDeviceId))
1195 {
1196 IORingBufferReset(caVoice->pBuf);
1197 err = AudioOutputUnitStart(caVoice->audioUnit);
1198 }
1199 if (RT_UNLIKELY(err != noErr))
1200 {
1201 LogRel(("CoreAudio: [Input] Failed to start recording (%RI32)\n", err));
1202 return -1;
1203 }
1204 break;
1205 }
1206 case VOICE_DISABLE:
1207 {
1208 /* Only stop the device if it is actually running */
1209 if (caIsRunning(caVoice->audioDeviceId))
1210 {
1211 err = AudioOutputUnitStop(caVoice->audioUnit);
1212 if (RT_UNLIKELY(err != noErr))
1213 {
1214 LogRel(("CoreAudio: [Input] Failed to stop recording (%RI32)\n", err));
1215 return -1;
1216 }
1217 err = AudioUnitReset(caVoice->audioUnit,
1218 kAudioUnitScope_Input,
1219 0);
1220 if (RT_UNLIKELY(err != noErr))
1221 {
1222 LogRel(("CoreAudio: [Input] Failed to reset AudioUnit (%RI32)\n", err));
1223 return -1;
1224 }
1225 }
1226 break;
1227 }
1228 }
1229 return 0;
1230}
1231
1232static int coreaudio_init_in(HWVoiceIn *hw, audsettings_t *as)
1233{
1234 OSStatus err = noErr;
1235 int rc = -1;
1236 UInt32 uSize = 0; /* temporary size of properties */
1237 UInt32 uFlag = 0; /* for setting flags */
1238 CFStringRef name; /* for the temporary device name fetching */
1239 const char *pszName;
1240 ComponentDescription cd; /* description for an audio component */
1241 Component cp; /* an audio component */
1242 AURenderCallbackStruct cb; /* holds the callback structure */
1243 UInt32 cFrames; /* default frame count */
1244 const SInt32 channelMap[2] = {0, 0}; /* Channel map for mono -> stereo */
1245
1246 caVoiceIn *caVoice = (caVoiceIn *) hw;
1247
1248 caVoice->audioUnit = NULL;
1249 caVoice->audioDeviceId = kAudioDeviceUnknown;
1250 caVoice->converter = NULL;
1251 caVoice->sampleRatio = 1;
1252
1253 /* Initialize the hardware info section with the audio settings */
1254 audio_pcm_init_info(&hw->info, as);
1255
1256 /* Fetch the default audio input device currently in use */
1257 uSize = sizeof(caVoice->audioDeviceId);
1258 err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
1259 &uSize,
1260 &caVoice->audioDeviceId);
1261 if (RT_UNLIKELY(err != noErr))
1262 {
1263 LogRel(("CoreAudio: [Input] Unable to find default input device (%RI32)\n", err));
1264 return -1;
1265 }
1266
1267 /* Try to get the name of the default input device and log it. It's not
1268 * fatal if it fails. */
1269 uSize = sizeof(CFStringRef);
1270 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
1271 0,
1272 1,
1273 kAudioObjectPropertyName,
1274 &uSize,
1275 &name);
1276 if (RT_LIKELY(err == noErr))
1277 {
1278 pszName = CFStringGetCStringPtr(name, kCFStringEncodingMacRoman);
1279 if (pszName)
1280 LogRel(("CoreAudio: Using default input device: %s\n", pszName));
1281 CFRelease(name);
1282 }
1283 else
1284 LogRel(("CoreAudio: [Input] Unable to get input device name (%RI32)\n", err));
1285
1286 /* Get the default frames buffer size, so that we can setup our internal
1287 * buffers. */
1288 uSize = sizeof(cFrames);
1289 err = AudioDeviceGetProperty(caVoice->audioDeviceId,
1290 0,
1291 true,
1292 kAudioDevicePropertyBufferFrameSize,
1293 &uSize,
1294 &cFrames);
1295 if (RT_UNLIKELY(err != noErr))
1296 {
1297 LogRel(("CoreAudio: [Input] Failed to get frame buffer size of the audio device (%RI32)\n", err));
1298 return -1;
1299 }
1300 /* Set the frame buffer size and honor any minimum/maximum restrictions on
1301 the device. */
1302 err = caSetFrameBufferSize(caVoice->audioDeviceId,
1303 true,
1304 cFrames,
1305 &cFrames);
1306 if (RT_UNLIKELY(err != noErr))
1307 {
1308 LogRel(("CoreAudio: [Input] Failed to set frame buffer size on the audio device (%RI32)\n", err));
1309 return -1;
1310 }
1311
1312 cd.componentType = kAudioUnitType_Output;
1313 cd.componentSubType = kAudioUnitSubType_HALOutput;
1314 cd.componentManufacturer = kAudioUnitManufacturer_Apple;
1315 cd.componentFlags = 0;
1316 cd.componentFlagsMask = 0;
1317
1318 /* Try to find the default HAL output component. */
1319 cp = FindNextComponent(NULL, &cd);
1320 if (RT_UNLIKELY(cp == 0))
1321 {
1322 LogRel(("CoreAudio: [Input] Failed to find HAL output component\n"));
1323 return -1;
1324 }
1325
1326 /* Open the default HAL output component. */
1327 err = OpenAComponent(cp, &caVoice->audioUnit);
1328 if (RT_UNLIKELY(err != noErr))
1329 {
1330 LogRel(("CoreAudio: [Input] Failed to open output component (%RI32)\n", err));
1331 return -1;
1332 }
1333
1334 /* Switch the I/O mode for input to on. */
1335 uFlag = 1;
1336 err = AudioUnitSetProperty(caVoice->audioUnit,
1337 kAudioOutputUnitProperty_EnableIO,
1338 kAudioUnitScope_Input,
1339 1,
1340 &uFlag,
1341 sizeof(uFlag));
1342 if (RT_UNLIKELY(err != noErr))
1343 {
1344 LogRel(("CoreAudio: [Input] Failed to set input I/O mode enabled (%RI32)\n", err));
1345 return -1;
1346 }
1347
1348 /* Switch the I/O mode for output to off. This is important, as this is a
1349 * pure input stream. */
1350 uFlag = 0;
1351 err = AudioUnitSetProperty(caVoice->audioUnit,
1352 kAudioOutputUnitProperty_EnableIO,
1353 kAudioUnitScope_Output,
1354 0,
1355 &uFlag,
1356 sizeof(uFlag));
1357 if (RT_UNLIKELY(err != noErr))
1358 {
1359 LogRel(("CoreAudio: [Input] Failed to set output I/O mode disabled (%RI32)\n", err));
1360 return -1;
1361 }
1362
1363 /* Set the default audio input device as the device for the new AudioUnit. */
1364 err = AudioUnitSetProperty(caVoice->audioUnit,
1365 kAudioOutputUnitProperty_CurrentDevice,
1366 kAudioUnitScope_Global,
1367 0,
1368 &caVoice->audioDeviceId,
1369 sizeof(caVoice->audioDeviceId));
1370 if (RT_UNLIKELY(err != noErr))
1371 {
1372 LogRel(("CoreAudio: [Input] Failed to set current device (%RI32)\n", err));
1373 return -1;
1374 }
1375
1376 /* CoreAudio will inform us on a second thread for new incoming audio data.
1377 * Therefor register an callback function, which will process the new data.
1378 * */
1379 cb.inputProc = caRecordingCallback;
1380 cb.inputProcRefCon = caVoice;
1381
1382 err = AudioUnitSetProperty(caVoice->audioUnit,
1383 kAudioOutputUnitProperty_SetInputCallback,
1384 kAudioUnitScope_Global,
1385 0,
1386 &cb,
1387 sizeof(cb));
1388 if (RT_UNLIKELY(err != noErr))
1389 {
1390 LogRel(("CoreAudio: [Input] Failed to set callback (%RI32)\n", err));
1391 return -1;
1392 }
1393
1394 /* Fetch the current stream format of the device. */
1395 uSize = sizeof(caVoice->deviceFormat);
1396 err = AudioUnitGetProperty(caVoice->audioUnit,
1397 kAudioUnitProperty_StreamFormat,
1398 kAudioUnitScope_Input,
1399 1,
1400 &caVoice->deviceFormat,
1401 &uSize);
1402 if (RT_UNLIKELY(err != noErr))
1403 {
1404 LogRel(("CoreAudio: [Input] Failed to get device format (%RI32)\n", err));
1405 return -1;
1406 }
1407
1408 /* Create an AudioStreamBasicDescription based on the audio settings of
1409 * VirtualBox. */
1410 caAudioSettingsToAudioStreamBasicDescription(as, &caVoice->streamFormat);
1411
1412#if DEBUG
1413 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Input] device", &caVoice->deviceFormat);
1414 caDebugOutputAudioStreamBasicDescription("CoreAudio: [Input] input", &caVoice->streamFormat);
1415#endif /* DEBUG */
1416
1417 /* If the frequency of the device is different from the requested one we
1418 * need a converter. The same count if the number of channels is different. */
1419 if ( caVoice->deviceFormat.mSampleRate != caVoice->streamFormat.mSampleRate
1420 || caVoice->deviceFormat.mChannelsPerFrame != caVoice->streamFormat.mChannelsPerFrame)
1421 {
1422 err = AudioConverterNew(&caVoice->deviceFormat,
1423 &caVoice->streamFormat,
1424 &caVoice->converter);
1425 if (RT_UNLIKELY(err != noErr))
1426 {
1427 LogRel(("CoreAudio: [Input] Failed to create the audio converter (%RI32)\n", err));
1428 return -1;
1429 }
1430
1431 if (caVoice->deviceFormat.mChannelsPerFrame == 1 &&
1432 caVoice->streamFormat.mChannelsPerFrame == 2)
1433 {
1434 /* If the channel count is different we have to tell this the converter
1435 and supply a channel mapping. For now we only support mapping
1436 from mono to stereo. For all other cases the core audio defaults
1437 are used, which means dropping additional channels in most
1438 cases. */
1439 err = AudioConverterSetProperty(caVoice->converter,
1440 kAudioConverterChannelMap,
1441 sizeof(channelMap),
1442 channelMap);
1443 if (RT_UNLIKELY(err != noErr))
1444 {
1445 LogRel(("CoreAudio: [Input] Failed to add a channel mapper to the audio converter (%RI32)\n", err));
1446 return -1;
1447 }
1448 }
1449 /* Set sample rate converter quality to maximum */
1450/* uFlag = kAudioConverterQuality_Max;*/
1451/* err = AudioConverterSetProperty(caVoice->converter,*/
1452/* kAudioConverterSampleRateConverterQuality,*/
1453/* sizeof(uFlag),*/
1454/* &uFlag);*/
1455 /* Not fatal */
1456/* if (RT_UNLIKELY(err != noErr))*/
1457/* LogRel(("CoreAudio: [Input] Failed to set the audio converter quality to the maximum (%RI32)\n", err));*/
1458
1459 Log(("CoreAudio: [Input] Converter in use\n"));
1460 /* Set the new format description for the stream. */
1461 err = AudioUnitSetProperty(caVoice->audioUnit,
1462 kAudioUnitProperty_StreamFormat,
1463 kAudioUnitScope_Output,
1464 1,
1465 &caVoice->deviceFormat,
1466 sizeof(caVoice->deviceFormat));
1467 if (RT_UNLIKELY(err != noErr))
1468 {
1469 LogRel(("CoreAudio: [Input] Failed to set stream format (%RI32)\n", err));
1470 return -1;
1471 }
1472 err = AudioUnitSetProperty(caVoice->audioUnit,
1473 kAudioUnitProperty_StreamFormat,
1474 kAudioUnitScope_Input,
1475 1,
1476 &caVoice->deviceFormat,
1477 sizeof(caVoice->deviceFormat));
1478 if (RT_UNLIKELY(err != noErr))
1479 {
1480 LogRel(("CoreAudio: [Input] Failed to set stream format (%RI32)\n", err));
1481 return -1;
1482 }
1483 }
1484 else
1485 {
1486 /* Set the new format description for the stream. */
1487 err = AudioUnitSetProperty(caVoice->audioUnit,
1488 kAudioUnitProperty_StreamFormat,
1489 kAudioUnitScope_Output,
1490 1,
1491 &caVoice->streamFormat,
1492 sizeof(caVoice->streamFormat));
1493 if (RT_UNLIKELY(err != noErr))
1494 {
1495 LogRel(("CoreAudio: [Input] Failed to set stream format (%RI32)\n", err));
1496 return -1;
1497 }
1498 }
1499
1500 /* Also set the frame buffer size off the device on our AudioUnit. This
1501 should make sure that the frames count which we receive in the render
1502 thread is as we like. */
1503 err = AudioUnitSetProperty(caVoice->audioUnit,
1504 kAudioUnitProperty_MaximumFramesPerSlice,
1505 kAudioUnitScope_Global,
1506 1,
1507 &cFrames,
1508 sizeof(cFrames));
1509 if (RT_UNLIKELY(err != noErr))
1510 {
1511 LogRel(("CoreAudio: [Input] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
1512 return -1;
1513 }
1514
1515 /* Finally initialize the new AudioUnit. */
1516 err = AudioUnitInitialize(caVoice->audioUnit);
1517 if (RT_UNLIKELY(err != noErr))
1518 {
1519 LogRel(("CoreAudio: [Input] Failed to initialize the AudioUnit (%RI32)\n", err));
1520 return -1;
1521 }
1522
1523 uSize = sizeof(caVoice->deviceFormat);
1524 err = AudioUnitGetProperty(caVoice->audioUnit,
1525 kAudioUnitProperty_StreamFormat,
1526 kAudioUnitScope_Output,
1527 1,
1528 &caVoice->deviceFormat,
1529 &uSize);
1530 if (RT_UNLIKELY(err != noErr))
1531 {
1532 LogRel(("CoreAudio: [Input] Failed to get device format (%RI32)\n", err));
1533 return -1;
1534 }
1535
1536 /* There are buggy devices (e.g. my bluetooth headset) which doesn't honor
1537 * the frame buffer size set in the previous calls. So finally get the
1538 * frame buffer size after the AudioUnit was initialized. */
1539 uSize = sizeof(cFrames);
1540 err = AudioUnitGetProperty(caVoice->audioUnit,
1541 kAudioUnitProperty_MaximumFramesPerSlice,
1542 kAudioUnitScope_Global,
1543 0,
1544 &cFrames,
1545 &uSize);
1546 if (RT_UNLIKELY(err != noErr))
1547 {
1548 LogRel(("CoreAudio: [Input] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
1549 return -1;
1550 }
1551
1552 /* Calculate the ratio between the device and the stream sample rate. */
1553 caVoice->sampleRatio = caVoice->streamFormat.mSampleRate / caVoice->deviceFormat.mSampleRate;
1554
1555 /* Set to zero first */
1556 caVoice->pBuf = NULL;
1557 /* Create the AudioBufferList structure with one buffer. */
1558 caVoice->bufferList.mNumberBuffers = 1;
1559 /* Initialize the buffer to nothing. */
1560 caVoice->bufferList.mBuffers[0].mNumberChannels = caVoice->streamFormat.mChannelsPerFrame;
1561 caVoice->bufferList.mBuffers[0].mDataByteSize = 0;
1562 caVoice->bufferList.mBuffers[0].mData = NULL;
1563
1564 /* Make sure that the ring buffer is big enough to hold the recording
1565 * data. Compare the maximum frames per slice value with the frames
1566 * necessary when using the converter where the sample rate could differ.
1567 * The result is always multiplied by the channels per frame to get the
1568 * samples count. */
1569 hw->samples = RT_MAX( cFrames,
1570 (cFrames * caVoice->deviceFormat.mBytesPerFrame * caVoice->sampleRatio) / caVoice->streamFormat.mBytesPerFrame)
1571 * caVoice->streamFormat.mChannelsPerFrame;
1572 /* Create the internal ring buffer. */
1573 IORingBufferCreate(&caVoice->pBuf, hw->samples << hw->info.shift);
1574 if (VALID_PTR(caVoice->pBuf))
1575 rc = 0;
1576 else
1577 LogRel(("CoreAudio: [Input] Failed to create internal ring buffer\n"));
1578
1579 if (rc != 0)
1580 {
1581 if (caVoice->pBuf)
1582 IORingBufferDestroy(caVoice->pBuf);
1583 AudioUnitUninitialize(caVoice->audioUnit);
1584 }
1585
1586 Log(("CoreAudio: [Input] HW samples: %d; Frame count: %RU32\n", hw->samples, cFrames));
1587
1588 return 0;
1589}
1590
1591static void coreaudio_fini_in(HWVoiceIn *hw)
1592{
1593 int rc = 0;
1594 OSStatus err = noErr;
1595 caVoiceIn *caVoice = (caVoiceIn *) hw;
1596
1597 rc = coreaudio_ctl_in(hw, VOICE_DISABLE);
1598 if (RT_LIKELY(rc == 0))
1599 {
1600 if (caVoice->converter)
1601 AudioConverterDispose(caVoice->converter);
1602 err = AudioUnitUninitialize(caVoice->audioUnit);
1603 if (RT_LIKELY(err == noErr))
1604 {
1605 err = CloseComponent(caVoice->audioUnit);
1606 if (RT_LIKELY(err == noErr))
1607 {
1608 caVoice->audioUnit = NULL;
1609 caVoice->audioDeviceId = kAudioDeviceUnknown;
1610 IORingBufferDestroy(caVoice->pBuf);
1611 }
1612 else
1613 LogRel(("CoreAudio: [Input] Failed to close the AudioUnit (%RI32)\n", err));
1614 }
1615 else
1616 LogRel(("CoreAudio: [Input] Failed to uninitialize the AudioUnit (%RI32)\n", err));
1617 }
1618 else
1619 LogRel(("CoreAudio: [Input] Failed to stop recording (%RI32)\n", err));
1620}
1621
1622/*******************************************************************************
1623 *
1624 * CoreAudio global section
1625 *
1626 ******************************************************************************/
1627
1628static void *coreaudio_audio_init(void)
1629{
1630 return &conf;
1631}
1632
1633static void coreaudio_audio_fini(void *opaque)
1634{
1635 NOREF(opaque);
1636}
1637
1638static struct audio_option coreaudio_options[] =
1639{
1640 {"BUFFER_SIZE", AUD_OPT_INT, &conf.cBufferFrames,
1641 "Size of the buffer in frames", NULL, 0},
1642 {NULL, 0, NULL, NULL, NULL, 0}
1643};
1644
1645static struct audio_pcm_ops coreaudio_pcm_ops =
1646{
1647 coreaudio_init_out,
1648 coreaudio_fini_out,
1649 coreaudio_run_out,
1650 coreaudio_write,
1651 coreaudio_ctl_out,
1652
1653 coreaudio_init_in,
1654 coreaudio_fini_in,
1655 coreaudio_run_in,
1656 coreaudio_read,
1657 coreaudio_ctl_in
1658};
1659
1660struct audio_driver coreaudio_audio_driver =
1661{
1662 INIT_FIELD(name =) "coreaudio",
1663 INIT_FIELD(descr =)
1664 "CoreAudio http://developer.apple.com/audio/coreaudio.html",
1665 INIT_FIELD(options =) coreaudio_options,
1666 INIT_FIELD(init =) coreaudio_audio_init,
1667 INIT_FIELD(fini =) coreaudio_audio_fini,
1668 INIT_FIELD(pcm_ops =) &coreaudio_pcm_ops,
1669 INIT_FIELD(can_be_default =) 1,
1670 INIT_FIELD(max_voices_out =) 1,
1671 INIT_FIELD(max_voices_in =) 1,
1672 INIT_FIELD(voice_size_out =) sizeof(caVoiceOut),
1673 INIT_FIELD(voice_size_in =) sizeof(caVoiceIn)
1674};
1675
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette