Tristan Matthews | 0a329cc | 2013-07-17 13:20:14 -0400 | [diff] [blame] | 1 | /* $Id$ */ |
| 2 | /* |
| 3 | * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | #include <pjmedia-audiodev/audiodev_imp.h> |
| 20 | #include <pj/assert.h> |
| 21 | #include <pj/log.h> |
| 22 | #include <pj/os.h> |
| 23 | |
| 24 | #if PJMEDIA_AUDIO_DEV_HAS_COREAUDIO |
| 25 | |
| 26 | #include "TargetConditionals.h" |
| 27 | #if TARGET_OS_IPHONE |
| 28 | #define COREAUDIO_MAC 0 |
| 29 | #else |
| 30 | #define COREAUDIO_MAC 1 |
| 31 | #endif |
| 32 | |
| 33 | #include <AudioUnit/AudioUnit.h> |
| 34 | #include <AudioToolbox/AudioConverter.h> |
| 35 | #if COREAUDIO_MAC |
| 36 | #include <CoreAudio/CoreAudio.h> |
| 37 | #else |
| 38 | #include <AudioToolbox/AudioServices.h> |
| 39 | |
| 40 | #define AudioDeviceID unsigned |
| 41 | |
| 42 | /** |
| 43 | * As in iOS SDK 4 or later, audio route change property listener is |
| 44 | * no longer necessary. Just make surethat your application can receive |
| 45 | * remote control events by adding the code: |
| 46 | * [[UIApplication sharedApplication] |
| 47 | * beginReceivingRemoteControlEvents]; |
| 48 | * Otherwise audio route change (such as headset plug/unplug) will not be |
| 49 | * processed while your application is in the background mode. |
| 50 | */ |
| 51 | #define USE_AUDIO_ROUTE_CHANGE_PROP_LISTENER 0 |
| 52 | |
| 53 | #endif |
| 54 | |
| 55 | /* For Mac OS 10.5.x and earlier */ |
| 56 | #if AUDIO_UNIT_VERSION < 1060 |
| 57 | #define AudioComponent Component |
| 58 | #define AudioComponentDescription ComponentDescription |
| 59 | #define AudioComponentInstance ComponentInstance |
| 60 | #define AudioComponentFindNext FindNextComponent |
| 61 | #define AudioComponentInstanceNew OpenAComponent |
| 62 | #define AudioComponentInstanceDispose CloseComponent |
| 63 | #endif |
| 64 | |
| 65 | |
| 66 | #define THIS_FILE "coreaudio_dev.c" |
| 67 | |
| 68 | /* coreaudio device info */ |
| 69 | struct coreaudio_dev_info |
| 70 | { |
| 71 | pjmedia_aud_dev_info info; |
| 72 | AudioDeviceID dev_id; |
| 73 | }; |
| 74 | |
| 75 | /* linked list of streams */ |
| 76 | struct stream_list |
| 77 | { |
| 78 | PJ_DECL_LIST_MEMBER(struct stream_list); |
| 79 | struct coreaudio_stream *stream; |
| 80 | }; |
| 81 | |
| 82 | /* coreaudio factory */ |
| 83 | struct coreaudio_factory |
| 84 | { |
| 85 | pjmedia_aud_dev_factory base; |
| 86 | pj_pool_t *base_pool; |
| 87 | pj_pool_t *pool; |
| 88 | pj_pool_factory *pf; |
| 89 | pj_mutex_t *mutex; |
| 90 | |
| 91 | unsigned dev_count; |
| 92 | struct coreaudio_dev_info *dev_info; |
| 93 | |
| 94 | AudioComponent io_comp; |
| 95 | struct stream_list streams; |
| 96 | }; |
| 97 | |
| 98 | /* Sound stream. */ |
| 99 | struct coreaudio_stream |
| 100 | { |
| 101 | pjmedia_aud_stream base; /**< Base stream */ |
| 102 | pjmedia_aud_param param; /**< Settings */ |
| 103 | pj_pool_t *pool; /**< Memory pool. */ |
| 104 | struct coreaudio_factory *cf; |
| 105 | struct stream_list list_entry; |
| 106 | |
| 107 | pjmedia_aud_rec_cb rec_cb; /**< Capture callback. */ |
| 108 | pjmedia_aud_play_cb play_cb; /**< Playback callback. */ |
| 109 | void *user_data; /**< Application data. */ |
| 110 | |
| 111 | pj_timestamp play_timestamp; |
| 112 | pj_timestamp rec_timestamp; |
| 113 | |
| 114 | pj_int16_t *rec_buf; |
| 115 | unsigned rec_buf_count; |
| 116 | pj_int16_t *play_buf; |
| 117 | unsigned play_buf_count; |
| 118 | |
| 119 | pj_bool_t interrupted; |
| 120 | pj_bool_t quit_flag; |
| 121 | pj_bool_t running; |
| 122 | |
| 123 | pj_bool_t rec_thread_initialized; |
| 124 | pj_thread_desc rec_thread_desc; |
| 125 | pj_thread_t *rec_thread; |
| 126 | |
| 127 | pj_bool_t play_thread_initialized; |
| 128 | pj_thread_desc play_thread_desc; |
| 129 | pj_thread_t *play_thread; |
| 130 | |
| 131 | AudioUnit io_units[2]; |
| 132 | AudioStreamBasicDescription streamFormat; |
| 133 | AudioBufferList *audio_buf; |
| 134 | |
| 135 | AudioConverterRef resample; |
| 136 | pj_int16_t *resample_buf; |
| 137 | void *resample_buf_ptr; |
| 138 | unsigned resample_buf_count; |
| 139 | unsigned resample_buf_size; |
| 140 | }; |
| 141 | |
| 142 | /* Static variable */ |
| 143 | static struct coreaudio_factory *cf_instance = NULL; |
| 144 | |
| 145 | /* Prototypes */ |
| 146 | static pj_status_t ca_factory_init(pjmedia_aud_dev_factory *f); |
| 147 | static pj_status_t ca_factory_destroy(pjmedia_aud_dev_factory *f); |
| 148 | static pj_status_t ca_factory_refresh(pjmedia_aud_dev_factory *f); |
| 149 | static unsigned ca_factory_get_dev_count(pjmedia_aud_dev_factory *f); |
| 150 | static pj_status_t ca_factory_get_dev_info(pjmedia_aud_dev_factory *f, |
| 151 | unsigned index, |
| 152 | pjmedia_aud_dev_info *info); |
| 153 | static pj_status_t ca_factory_default_param(pjmedia_aud_dev_factory *f, |
| 154 | unsigned index, |
| 155 | pjmedia_aud_param *param); |
| 156 | static pj_status_t ca_factory_create_stream(pjmedia_aud_dev_factory *f, |
| 157 | const pjmedia_aud_param *param, |
| 158 | pjmedia_aud_rec_cb rec_cb, |
| 159 | pjmedia_aud_play_cb play_cb, |
| 160 | void *user_data, |
| 161 | pjmedia_aud_stream **p_aud_strm); |
| 162 | |
| 163 | static pj_status_t ca_stream_get_param(pjmedia_aud_stream *strm, |
| 164 | pjmedia_aud_param *param); |
| 165 | static pj_status_t ca_stream_get_cap(pjmedia_aud_stream *strm, |
| 166 | pjmedia_aud_dev_cap cap, |
| 167 | void *value); |
| 168 | static pj_status_t ca_stream_set_cap(pjmedia_aud_stream *strm, |
| 169 | pjmedia_aud_dev_cap cap, |
| 170 | const void *value); |
| 171 | static pj_status_t ca_stream_start(pjmedia_aud_stream *strm); |
| 172 | static pj_status_t ca_stream_stop(pjmedia_aud_stream *strm); |
| 173 | static pj_status_t ca_stream_destroy(pjmedia_aud_stream *strm); |
| 174 | static pj_status_t create_audio_unit(AudioComponent io_comp, |
| 175 | AudioDeviceID dev_id, |
| 176 | pjmedia_dir dir, |
| 177 | struct coreaudio_stream *strm, |
| 178 | AudioUnit *io_unit); |
| 179 | #if !COREAUDIO_MAC |
| 180 | static void interruptionListener(void *inClientData, UInt32 inInterruption); |
| 181 | static void propListener(void * inClientData, |
| 182 | AudioSessionPropertyID inID, |
| 183 | UInt32 inDataSize, |
| 184 | const void * inData); |
| 185 | #endif |
| 186 | |
| 187 | /* Operations */ |
| 188 | static pjmedia_aud_dev_factory_op factory_op = |
| 189 | { |
| 190 | &ca_factory_init, |
| 191 | &ca_factory_destroy, |
| 192 | &ca_factory_get_dev_count, |
| 193 | &ca_factory_get_dev_info, |
| 194 | &ca_factory_default_param, |
| 195 | &ca_factory_create_stream, |
| 196 | &ca_factory_refresh |
| 197 | }; |
| 198 | |
| 199 | static pjmedia_aud_stream_op stream_op = |
| 200 | { |
| 201 | &ca_stream_get_param, |
| 202 | &ca_stream_get_cap, |
| 203 | &ca_stream_set_cap, |
| 204 | &ca_stream_start, |
| 205 | &ca_stream_stop, |
| 206 | &ca_stream_destroy |
| 207 | }; |
| 208 | |
| 209 | |
| 210 | /**************************************************************************** |
| 211 | * Factory operations |
| 212 | */ |
| 213 | /* |
| 214 | * Init coreaudio audio driver. |
| 215 | */ |
| 216 | pjmedia_aud_dev_factory* pjmedia_coreaudio_factory(pj_pool_factory *pf) |
| 217 | { |
| 218 | struct coreaudio_factory *f; |
| 219 | pj_pool_t *pool; |
| 220 | |
| 221 | pool = pj_pool_create(pf, "core audio base", 1000, 1000, NULL); |
| 222 | f = PJ_POOL_ZALLOC_T(pool, struct coreaudio_factory); |
| 223 | f->pf = pf; |
| 224 | f->base_pool = pool; |
| 225 | f->base.op = &factory_op; |
| 226 | |
| 227 | return &f->base; |
| 228 | } |
| 229 | |
| 230 | |
| 231 | /* API: init factory */ |
| 232 | static pj_status_t ca_factory_init(pjmedia_aud_dev_factory *f) |
| 233 | { |
| 234 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 235 | AudioComponentDescription desc; |
| 236 | pj_status_t status; |
| 237 | #if !COREAUDIO_MAC |
| 238 | unsigned i; |
| 239 | OSStatus ostatus; |
| 240 | #endif |
| 241 | |
| 242 | pj_list_init(&cf->streams); |
| 243 | status = pj_mutex_create_recursive(cf->base_pool, |
| 244 | "coreaudio", |
| 245 | &cf->mutex); |
| 246 | if (status != PJ_SUCCESS) |
| 247 | return status; |
| 248 | |
| 249 | desc.componentType = kAudioUnitType_Output; |
| 250 | #if COREAUDIO_MAC |
| 251 | desc.componentSubType = kAudioUnitSubType_HALOutput; |
| 252 | #else |
| 253 | desc.componentSubType = kAudioUnitSubType_RemoteIO; |
| 254 | #endif |
| 255 | desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| 256 | desc.componentFlags = 0; |
| 257 | desc.componentFlagsMask = 0; |
| 258 | |
| 259 | cf->io_comp = AudioComponentFindNext(NULL, &desc); |
| 260 | if (cf->io_comp == NULL) |
| 261 | return PJMEDIA_EAUD_INIT; // cannot find IO unit; |
| 262 | |
| 263 | status = ca_factory_refresh(f); |
| 264 | if (status != PJ_SUCCESS) |
| 265 | return status; |
| 266 | |
| 267 | #if !COREAUDIO_MAC |
| 268 | cf->pool = pj_pool_create(cf->pf, "core audio", 1000, 1000, NULL); |
| 269 | cf->dev_count = 1; |
| 270 | cf->dev_info = (struct coreaudio_dev_info*) |
| 271 | pj_pool_calloc(cf->pool, cf->dev_count, |
| 272 | sizeof(struct coreaudio_dev_info)); |
| 273 | for (i = 0; i < cf->dev_count; i++) { |
| 274 | struct coreaudio_dev_info *cdi; |
| 275 | |
| 276 | cdi = &cf->dev_info[i]; |
| 277 | pj_bzero(cdi, sizeof(*cdi)); |
| 278 | cdi->dev_id = 0; |
| 279 | strcpy(cdi->info.name, "iPhone IO device"); |
| 280 | strcpy(cdi->info.driver, "apple"); |
| 281 | cdi->info.input_count = 1; |
| 282 | cdi->info.output_count = 1; |
| 283 | cdi->info.default_samples_per_sec = 8000; |
| 284 | |
| 285 | /* Set the device capabilities here */ |
| 286 | cdi->info.caps = PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY | |
| 287 | PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY | |
| 288 | PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING | |
| 289 | PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE | |
| 290 | PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE | |
| 291 | PJMEDIA_AUD_DEV_CAP_EC; |
| 292 | cdi->info.routes = PJMEDIA_AUD_DEV_ROUTE_LOUDSPEAKER | |
| 293 | PJMEDIA_AUD_DEV_ROUTE_EARPIECE | |
| 294 | PJMEDIA_AUD_DEV_ROUTE_BLUETOOTH; |
| 295 | |
| 296 | PJ_LOG(4, (THIS_FILE, " dev_id %d: %s (in=%d, out=%d) %dHz", |
| 297 | i, |
| 298 | cdi->info.name, |
| 299 | cdi->info.input_count, |
| 300 | cdi->info.output_count, |
| 301 | cdi->info.default_samples_per_sec)); |
| 302 | } |
| 303 | |
| 304 | /* Initialize the Audio Session */ |
| 305 | ostatus = AudioSessionInitialize(NULL, NULL, interruptionListener, NULL); |
| 306 | if (ostatus != kAudioSessionNoError) { |
| 307 | PJ_LOG(4, (THIS_FILE, |
| 308 | "Warning: cannot initialize audio session services (%i)", |
| 309 | ostatus)); |
| 310 | } |
| 311 | |
| 312 | /* Listen for audio routing change notifications. */ |
| 313 | #if USE_AUDIO_ROUTE_CHANGE_PROP_LISTENER != 0 |
| 314 | ostatus = AudioSessionAddPropertyListener( |
| 315 | kAudioSessionProperty_AudioRouteChange, |
| 316 | propListener, cf); |
| 317 | if (ostatus != kAudioSessionNoError) { |
| 318 | PJ_LOG(4, (THIS_FILE, |
| 319 | "Warning: cannot listen for audio route change " |
| 320 | "notifications (%i)", ostatus)); |
| 321 | } |
| 322 | #endif |
| 323 | |
| 324 | cf_instance = cf; |
| 325 | #endif |
| 326 | |
| 327 | PJ_LOG(4, (THIS_FILE, "core audio initialized")); |
| 328 | |
| 329 | return PJ_SUCCESS; |
| 330 | } |
| 331 | |
| 332 | /* API: destroy factory */ |
| 333 | static pj_status_t ca_factory_destroy(pjmedia_aud_dev_factory *f) |
| 334 | { |
| 335 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 336 | pj_pool_t *pool; |
| 337 | |
| 338 | pj_assert(cf); |
| 339 | pj_assert(cf->base_pool); |
| 340 | pj_assert(pj_list_empty(&cf->streams)); |
| 341 | |
| 342 | #if !COREAUDIO_MAC |
| 343 | #if USE_AUDIO_ROUTE_CHANGE_PROP_LISTENER != 0 |
| 344 | AudioSessionRemovePropertyListenerWithUserData( |
| 345 | kAudioSessionProperty_AudioRouteChange, propListener, cf); |
| 346 | #endif |
| 347 | #endif |
| 348 | |
| 349 | if (cf->pool) { |
| 350 | pj_pool_release(cf->pool); |
| 351 | cf->pool = NULL; |
| 352 | } |
| 353 | |
| 354 | if (cf->mutex) { |
| 355 | pj_mutex_lock(cf->mutex); |
| 356 | cf_instance = NULL; |
| 357 | pj_mutex_unlock(cf->mutex); |
| 358 | pj_mutex_destroy(cf->mutex); |
| 359 | cf->mutex = NULL; |
| 360 | } |
| 361 | |
| 362 | pool = cf->base_pool; |
| 363 | cf->base_pool = NULL; |
| 364 | pj_pool_release(pool); |
| 365 | |
| 366 | return PJ_SUCCESS; |
| 367 | } |
| 368 | |
| 369 | /* API: refresh the device list */ |
| 370 | static pj_status_t ca_factory_refresh(pjmedia_aud_dev_factory *f) |
| 371 | { |
| 372 | #if !COREAUDIO_MAC |
| 373 | /* iPhone doesn't support refreshing the device list */ |
| 374 | PJ_UNUSED_ARG(f); |
| 375 | return PJ_SUCCESS; |
| 376 | #else |
| 377 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 378 | unsigned i; |
| 379 | unsigned dev_count; |
| 380 | AudioObjectPropertyAddress addr; |
| 381 | AudioDeviceID *dev_ids; |
| 382 | UInt32 buf_size, dev_size, size = sizeof(AudioDeviceID); |
| 383 | AudioBufferList *buf = NULL; |
| 384 | OSStatus ostatus; |
| 385 | |
| 386 | if (cf->pool != NULL) { |
| 387 | pj_pool_release(cf->pool); |
| 388 | cf->pool = NULL; |
| 389 | } |
| 390 | |
| 391 | cf->dev_count = 0; |
| 392 | cf->pool = pj_pool_create(cf->pf, "core audio", 1000, 1000, NULL); |
| 393 | |
| 394 | /* Find out how many audio devices there are */ |
| 395 | addr.mSelector = kAudioHardwarePropertyDevices; |
| 396 | addr.mScope = kAudioObjectPropertyScopeGlobal; |
| 397 | addr.mElement = kAudioObjectPropertyElementMaster; |
| 398 | ostatus = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &addr, |
| 399 | 0, NULL, &dev_size); |
| 400 | if (ostatus != noErr) { |
| 401 | dev_size = 0; |
| 402 | } |
| 403 | |
| 404 | /* Calculate the number of audio devices available */ |
| 405 | dev_count = dev_size / size; |
| 406 | if (dev_count==0) { |
| 407 | PJ_LOG(4,(THIS_FILE, "core audio found no sound devices")); |
| 408 | /* Enabling this will cause pjsua-lib initialization to fail when |
| 409 | * there is no sound device installed in the system, even when pjsua |
| 410 | * has been run with --null-audio. Moreover, it might be better to |
| 411 | * think that the core audio backend initialization is successful, |
| 412 | * regardless there is no audio device installed, as later application |
| 413 | * can check it using get_dev_count(). |
| 414 | return PJMEDIA_EAUD_NODEV; |
| 415 | */ |
| 416 | return PJ_SUCCESS; |
| 417 | } |
| 418 | PJ_LOG(4, (THIS_FILE, "core audio detected %d devices", |
| 419 | dev_count)); |
| 420 | |
| 421 | /* Get all the audio device IDs */ |
| 422 | dev_ids = (AudioDeviceID *)pj_pool_calloc(cf->pool, dev_size, size); |
| 423 | if (!dev_ids) |
| 424 | return PJ_ENOMEM; |
| 425 | pj_bzero(dev_ids, dev_count); |
| 426 | ostatus = AudioObjectGetPropertyData(kAudioObjectSystemObject, &addr, |
| 427 | 0, NULL, |
| 428 | &dev_size, (void *)dev_ids); |
| 429 | if (ostatus != noErr ) { |
| 430 | /* This should not happen since we have successfully retrieved |
| 431 | * the property data size before |
| 432 | */ |
| 433 | return PJMEDIA_EAUD_INIT; |
| 434 | } |
| 435 | |
| 436 | if (dev_size > 1) { |
| 437 | AudioDeviceID dev_id = kAudioObjectUnknown; |
| 438 | unsigned idx = 0; |
| 439 | |
| 440 | /* Find default audio input device */ |
| 441 | addr.mSelector = kAudioHardwarePropertyDefaultInputDevice; |
| 442 | addr.mScope = kAudioObjectPropertyScopeGlobal; |
| 443 | addr.mElement = kAudioObjectPropertyElementMaster; |
| 444 | size = sizeof(dev_id); |
| 445 | |
| 446 | ostatus = AudioObjectGetPropertyData(kAudioObjectSystemObject, |
| 447 | &addr, 0, NULL, |
| 448 | &size, (void *)&dev_id); |
| 449 | if (ostatus == noErr && dev_id != dev_ids[idx]) { |
| 450 | AudioDeviceID temp_id = dev_ids[idx]; |
| 451 | |
| 452 | for (i = idx + 1; i < dev_size; i++) { |
| 453 | if (dev_ids[i] == dev_id) { |
| 454 | dev_ids[idx++] = dev_id; |
| 455 | dev_ids[i] = temp_id; |
| 456 | break; |
| 457 | } |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | /* Find default audio output device */ |
| 462 | addr.mSelector = kAudioHardwarePropertyDefaultOutputDevice; |
| 463 | ostatus = AudioObjectGetPropertyData(kAudioObjectSystemObject, |
| 464 | &addr, 0, NULL, |
| 465 | &size, (void *)&dev_id); |
| 466 | if (ostatus == noErr && dev_id != dev_ids[idx]) { |
| 467 | AudioDeviceID temp_id = dev_ids[idx]; |
| 468 | |
| 469 | for (i = idx + 1; i < dev_size; i++) { |
| 470 | if (dev_ids[i] == dev_id) { |
| 471 | dev_ids[idx] = dev_id; |
| 472 | dev_ids[i] = temp_id; |
| 473 | break; |
| 474 | } |
| 475 | } |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | /* Build the devices' info */ |
| 480 | cf->dev_info = (struct coreaudio_dev_info*) |
| 481 | pj_pool_calloc(cf->pool, dev_count, |
| 482 | sizeof(struct coreaudio_dev_info)); |
| 483 | buf_size = 0; |
| 484 | for (i = 0; i < dev_count; i++) { |
| 485 | struct coreaudio_dev_info *cdi; |
| 486 | Float64 sampleRate; |
| 487 | |
| 488 | cdi = &cf->dev_info[i]; |
| 489 | pj_bzero(cdi, sizeof(*cdi)); |
| 490 | cdi->dev_id = dev_ids[i]; |
| 491 | |
| 492 | /* Get device name */ |
| 493 | addr.mSelector = kAudioDevicePropertyDeviceName; |
| 494 | addr.mScope = kAudioObjectPropertyScopeGlobal; |
| 495 | addr.mElement = kAudioObjectPropertyElementMaster; |
| 496 | size = sizeof(cdi->info.name); |
| 497 | AudioObjectGetPropertyData(cdi->dev_id, &addr, |
| 498 | 0, NULL, |
| 499 | &size, (void *)cdi->info.name); |
| 500 | |
| 501 | strcpy(cdi->info.driver, "core audio"); |
| 502 | |
| 503 | /* Get the number of input channels */ |
| 504 | addr.mSelector = kAudioDevicePropertyStreamConfiguration; |
| 505 | addr.mScope = kAudioDevicePropertyScopeInput; |
| 506 | size = 0; |
| 507 | ostatus = AudioObjectGetPropertyDataSize(cdi->dev_id, &addr, |
| 508 | 0, NULL, &size); |
| 509 | if (ostatus == noErr && size > 0) { |
| 510 | |
| 511 | if (size > buf_size) { |
| 512 | buf = pj_pool_alloc(cf->pool, size); |
| 513 | buf_size = size; |
| 514 | } |
| 515 | if (buf) { |
| 516 | UInt32 idx; |
| 517 | |
| 518 | /* Get the input stream configuration */ |
| 519 | ostatus = AudioObjectGetPropertyData(cdi->dev_id, &addr, |
| 520 | 0, NULL, |
| 521 | &size, buf); |
| 522 | if (ostatus == noErr) { |
| 523 | /* Count the total number of input channels in |
| 524 | * the stream |
| 525 | */ |
| 526 | for (idx = 0; idx < buf->mNumberBuffers; idx++) { |
| 527 | cdi->info.input_count += |
| 528 | buf->mBuffers[idx].mNumberChannels; |
| 529 | } |
| 530 | } |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | /* Get the number of output channels */ |
| 535 | addr.mScope = kAudioDevicePropertyScopeOutput; |
| 536 | size = 0; |
| 537 | ostatus = AudioObjectGetPropertyDataSize(cdi->dev_id, &addr, |
| 538 | 0, NULL, &size); |
| 539 | if (ostatus == noErr && size > 0) { |
| 540 | |
| 541 | if (size > buf_size) { |
| 542 | buf = pj_pool_alloc(cf->pool, size); |
| 543 | buf_size = size; |
| 544 | } |
| 545 | if (buf) { |
| 546 | UInt32 idx; |
| 547 | |
| 548 | /* Get the output stream configuration */ |
| 549 | ostatus = AudioObjectGetPropertyData(cdi->dev_id, &addr, |
| 550 | 0, NULL, |
| 551 | &size, buf); |
| 552 | if (ostatus == noErr) { |
| 553 | /* Count the total number of output channels in |
| 554 | * the stream |
| 555 | */ |
| 556 | for (idx = 0; idx < buf->mNumberBuffers; idx++) { |
| 557 | cdi->info.output_count += |
| 558 | buf->mBuffers[idx].mNumberChannels; |
| 559 | } |
| 560 | } |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | /* Get default sample rate */ |
| 565 | addr.mSelector = kAudioDevicePropertyNominalSampleRate; |
| 566 | addr.mScope = kAudioObjectPropertyScopeGlobal; |
| 567 | size = sizeof(Float64); |
| 568 | ostatus = AudioObjectGetPropertyData (cdi->dev_id, &addr, |
| 569 | 0, NULL, |
| 570 | &size, &sampleRate); |
| 571 | cdi->info.default_samples_per_sec = (ostatus == noErr ? |
| 572 | sampleRate: |
| 573 | 16000); |
| 574 | |
| 575 | /* Set device capabilities here */ |
| 576 | if (cdi->info.input_count > 0) { |
| 577 | cdi->info.caps |= PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY; |
| 578 | } |
| 579 | if (cdi->info.output_count > 0) { |
| 580 | cdi->info.caps |= PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY; |
| 581 | addr.mSelector = kAudioDevicePropertyVolumeScalar; |
| 582 | addr.mScope = kAudioDevicePropertyScopeOutput; |
| 583 | if (AudioObjectHasProperty(cdi->dev_id, &addr)) { |
| 584 | cdi->info.caps |= PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING; |
| 585 | } |
| 586 | } |
| 587 | |
| 588 | cf->dev_count++; |
| 589 | |
| 590 | PJ_LOG(4, (THIS_FILE, " dev_id %d: %s (in=%d, out=%d) %dHz", |
| 591 | i, |
| 592 | cdi->info.name, |
| 593 | cdi->info.input_count, |
| 594 | cdi->info.output_count, |
| 595 | cdi->info.default_samples_per_sec)); |
| 596 | } |
| 597 | |
| 598 | return PJ_SUCCESS; |
| 599 | #endif |
| 600 | } |
| 601 | |
| 602 | /* API: get number of devices */ |
| 603 | static unsigned ca_factory_get_dev_count(pjmedia_aud_dev_factory *f) |
| 604 | { |
| 605 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 606 | return cf->dev_count; |
| 607 | } |
| 608 | |
| 609 | /* API: get device info */ |
| 610 | static pj_status_t ca_factory_get_dev_info(pjmedia_aud_dev_factory *f, |
| 611 | unsigned index, |
| 612 | pjmedia_aud_dev_info *info) |
| 613 | { |
| 614 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 615 | |
| 616 | PJ_ASSERT_RETURN(index < cf->dev_count, PJMEDIA_EAUD_INVDEV); |
| 617 | |
| 618 | pj_memcpy(info, &cf->dev_info[index].info, sizeof(*info)); |
| 619 | |
| 620 | return PJ_SUCCESS; |
| 621 | } |
| 622 | |
| 623 | /* API: create default device parameter */ |
| 624 | static pj_status_t ca_factory_default_param(pjmedia_aud_dev_factory *f, |
| 625 | unsigned index, |
| 626 | pjmedia_aud_param *param) |
| 627 | { |
| 628 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 629 | struct coreaudio_dev_info *di = &cf->dev_info[index]; |
| 630 | |
| 631 | PJ_ASSERT_RETURN(index < cf->dev_count, PJMEDIA_EAUD_INVDEV); |
| 632 | |
| 633 | pj_bzero(param, sizeof(*param)); |
| 634 | if (di->info.input_count && di->info.output_count) { |
| 635 | param->dir = PJMEDIA_DIR_CAPTURE_PLAYBACK; |
| 636 | param->rec_id = index; |
| 637 | param->play_id = index; |
| 638 | } else if (di->info.input_count) { |
| 639 | param->dir = PJMEDIA_DIR_CAPTURE; |
| 640 | param->rec_id = index; |
| 641 | param->play_id = PJMEDIA_AUD_INVALID_DEV; |
| 642 | } else if (di->info.output_count) { |
| 643 | param->dir = PJMEDIA_DIR_PLAYBACK; |
| 644 | param->play_id = index; |
| 645 | param->rec_id = PJMEDIA_AUD_INVALID_DEV; |
| 646 | } else { |
| 647 | return PJMEDIA_EAUD_INVDEV; |
| 648 | } |
| 649 | |
| 650 | /* Set the mandatory settings here */ |
| 651 | param->clock_rate = di->info.default_samples_per_sec; |
| 652 | param->channel_count = 1; |
| 653 | param->samples_per_frame = di->info.default_samples_per_sec * 20 / 1000; |
| 654 | param->bits_per_sample = 16; |
| 655 | |
| 656 | /* Set the param for device capabilities here */ |
| 657 | param->flags = PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY | |
| 658 | PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY; |
| 659 | param->input_latency_ms = PJMEDIA_SND_DEFAULT_REC_LATENCY; |
| 660 | param->output_latency_ms = PJMEDIA_SND_DEFAULT_PLAY_LATENCY; |
| 661 | |
| 662 | return PJ_SUCCESS; |
| 663 | } |
| 664 | |
| 665 | OSStatus resampleProc(AudioConverterRef inAudioConverter, |
| 666 | UInt32 *ioNumberDataPackets, |
| 667 | AudioBufferList *ioData, |
| 668 | AudioStreamPacketDescription **outDataPacketDescription, |
| 669 | void *inUserData) |
| 670 | { |
| 671 | struct coreaudio_stream *strm = (struct coreaudio_stream*)inUserData; |
| 672 | |
| 673 | if (*ioNumberDataPackets > strm->resample_buf_size) |
| 674 | *ioNumberDataPackets = strm->resample_buf_size; |
| 675 | |
| 676 | ioData->mNumberBuffers = 1; |
| 677 | ioData->mBuffers[0].mNumberChannels = strm->streamFormat.mChannelsPerFrame; |
| 678 | ioData->mBuffers[0].mData = strm->resample_buf_ptr; |
| 679 | ioData->mBuffers[0].mDataByteSize = *ioNumberDataPackets * |
| 680 | strm->streamFormat.mChannelsPerFrame * |
| 681 | strm->param.bits_per_sample >> 3; |
| 682 | |
| 683 | return noErr; |
| 684 | } |
| 685 | |
| 686 | static OSStatus resample_callback(void *inRefCon, |
| 687 | AudioUnitRenderActionFlags *ioActionFlags, |
| 688 | const AudioTimeStamp *inTimeStamp, |
| 689 | UInt32 inBusNumber, |
| 690 | UInt32 inNumberFrames, |
| 691 | AudioBufferList *ioData) |
| 692 | { |
| 693 | struct coreaudio_stream *strm = (struct coreaudio_stream*)inRefCon; |
| 694 | OSStatus ostatus; |
| 695 | pj_status_t status = 0; |
| 696 | unsigned nsamples; |
| 697 | AudioBufferList *buf = strm->audio_buf; |
| 698 | pj_int16_t *input; |
| 699 | UInt32 resampleSize; |
| 700 | |
| 701 | pj_assert(!strm->quit_flag); |
| 702 | |
| 703 | /* Known cases of callback's thread: |
| 704 | * - The thread may be changed in the middle of a session |
| 705 | * it happens when plugging/unplugging headphone. |
| 706 | * - The same thread may be reused in consecutive sessions. The first |
| 707 | * session will leave TLS set, but release the TLS data address, |
| 708 | * so the second session must re-register the callback's thread. |
| 709 | */ |
| 710 | if (strm->rec_thread_initialized == 0 || !pj_thread_is_registered()) |
| 711 | { |
| 712 | pj_bzero(strm->rec_thread_desc, sizeof(pj_thread_desc)); |
| 713 | status = pj_thread_register("ca_rec", strm->rec_thread_desc, |
| 714 | &strm->rec_thread); |
| 715 | strm->rec_thread_initialized = 1; |
| 716 | PJ_LOG(5,(THIS_FILE, "Recorder thread started, (%i frames)", |
| 717 | inNumberFrames)); |
| 718 | } |
| 719 | |
| 720 | buf->mBuffers[0].mData = NULL; |
| 721 | buf->mBuffers[0].mDataByteSize = inNumberFrames * |
| 722 | strm->streamFormat.mChannelsPerFrame; |
| 723 | /* Render the unit to get input data */ |
| 724 | ostatus = AudioUnitRender(strm->io_units[0], |
| 725 | ioActionFlags, |
| 726 | inTimeStamp, |
| 727 | inBusNumber, |
| 728 | inNumberFrames, |
| 729 | buf); |
| 730 | |
| 731 | if (ostatus != noErr) { |
| 732 | PJ_LOG(5, (THIS_FILE, "Core audio unit render error %i", ostatus)); |
| 733 | goto on_break; |
| 734 | } |
| 735 | input = (pj_int16_t *)buf->mBuffers[0].mData; |
| 736 | |
| 737 | resampleSize = strm->resample_buf_size; |
| 738 | nsamples = inNumberFrames * strm->param.channel_count + |
| 739 | strm->resample_buf_count; |
| 740 | |
| 741 | if (nsamples >= resampleSize) { |
| 742 | pjmedia_frame frame; |
| 743 | UInt32 resampleOutput = strm->param.samples_per_frame / |
| 744 | strm->streamFormat.mChannelsPerFrame; |
| 745 | AudioBufferList ab; |
| 746 | |
| 747 | frame.type = PJMEDIA_FRAME_TYPE_AUDIO; |
| 748 | frame.buf = (void*) strm->rec_buf; |
| 749 | frame.size = strm->param.samples_per_frame * |
| 750 | strm->param.bits_per_sample >> 3; |
| 751 | frame.bit_info = 0; |
| 752 | |
| 753 | ab.mNumberBuffers = 1; |
| 754 | ab.mBuffers[0].mNumberChannels = strm->streamFormat.mChannelsPerFrame; |
| 755 | ab.mBuffers[0].mData = strm->rec_buf; |
| 756 | ab.mBuffers[0].mDataByteSize = frame.size; |
| 757 | |
| 758 | /* If buffer is not empty, combine the buffer with the just incoming |
| 759 | * samples, then call put_frame. |
| 760 | */ |
| 761 | if (strm->resample_buf_count) { |
| 762 | unsigned chunk_count = resampleSize - strm->resample_buf_count; |
| 763 | pjmedia_copy_samples(strm->resample_buf + strm->resample_buf_count, |
| 764 | input, chunk_count); |
| 765 | |
| 766 | /* Do the resample */ |
| 767 | |
| 768 | strm->resample_buf_ptr = strm->resample_buf; |
| 769 | ostatus = AudioConverterFillComplexBuffer(strm->resample, |
| 770 | resampleProc, |
| 771 | strm, |
| 772 | &resampleOutput, |
| 773 | &ab, |
| 774 | NULL); |
| 775 | if (ostatus != noErr) { |
| 776 | goto on_break; |
| 777 | } |
| 778 | frame.timestamp.u64 = strm->rec_timestamp.u64; |
| 779 | |
| 780 | status = (*strm->rec_cb)(strm->user_data, &frame); |
| 781 | |
| 782 | input = input + chunk_count; |
| 783 | nsamples -= resampleSize; |
| 784 | strm->resample_buf_count = 0; |
| 785 | strm->rec_timestamp.u64 += strm->param.samples_per_frame / |
| 786 | strm->param.channel_count; |
| 787 | } |
| 788 | |
| 789 | |
| 790 | /* Give all frames we have */ |
| 791 | while (nsamples >= resampleSize && status == 0) { |
| 792 | frame.timestamp.u64 = strm->rec_timestamp.u64; |
| 793 | |
| 794 | /* Do the resample */ |
| 795 | strm->resample_buf_ptr = input; |
| 796 | ab.mBuffers[0].mDataByteSize = frame.size; |
| 797 | resampleOutput = strm->param.samples_per_frame / |
| 798 | strm->streamFormat.mChannelsPerFrame; |
| 799 | ostatus = AudioConverterFillComplexBuffer(strm->resample, |
| 800 | resampleProc, |
| 801 | strm, |
| 802 | &resampleOutput, |
| 803 | &ab, |
| 804 | NULL); |
| 805 | if (ostatus != noErr) { |
| 806 | goto on_break; |
| 807 | } |
| 808 | |
| 809 | status = (*strm->rec_cb)(strm->user_data, &frame); |
| 810 | |
| 811 | input = (pj_int16_t*) input + resampleSize; |
| 812 | nsamples -= resampleSize; |
| 813 | strm->rec_timestamp.u64 += strm->param.samples_per_frame / |
| 814 | strm->param.channel_count; |
| 815 | } |
| 816 | |
| 817 | /* Store the remaining samples into the buffer */ |
| 818 | if (nsamples && status == 0) { |
| 819 | strm->resample_buf_count = nsamples; |
| 820 | pjmedia_copy_samples(strm->resample_buf, input, |
| 821 | nsamples); |
| 822 | } |
| 823 | |
| 824 | } else { |
| 825 | /* Not enough samples, let's just store them in the buffer */ |
| 826 | pjmedia_copy_samples(strm->resample_buf + strm->resample_buf_count, |
| 827 | input, |
| 828 | inNumberFrames * strm->param.channel_count); |
| 829 | strm->resample_buf_count += inNumberFrames * |
| 830 | strm->param.channel_count; |
| 831 | } |
| 832 | |
| 833 | return noErr; |
| 834 | |
| 835 | on_break: |
| 836 | return -1; |
| 837 | } |
| 838 | |
| 839 | static OSStatus input_callback(void *inRefCon, |
| 840 | AudioUnitRenderActionFlags *ioActionFlags, |
| 841 | const AudioTimeStamp *inTimeStamp, |
| 842 | UInt32 inBusNumber, |
| 843 | UInt32 inNumberFrames, |
| 844 | AudioBufferList *ioData) |
| 845 | { |
| 846 | struct coreaudio_stream *strm = (struct coreaudio_stream*)inRefCon; |
| 847 | OSStatus ostatus; |
| 848 | pj_status_t status = 0; |
| 849 | unsigned nsamples; |
| 850 | AudioBufferList *buf = strm->audio_buf; |
| 851 | pj_int16_t *input; |
| 852 | |
| 853 | pj_assert(!strm->quit_flag); |
| 854 | |
| 855 | /* Known cases of callback's thread: |
| 856 | * - The thread may be changed in the middle of a session |
| 857 | * it happens when plugging/unplugging headphone. |
| 858 | * - The same thread may be reused in consecutive sessions. The first |
| 859 | * session will leave TLS set, but release the TLS data address, |
| 860 | * so the second session must re-register the callback's thread. |
| 861 | */ |
| 862 | if (strm->rec_thread_initialized == 0 || !pj_thread_is_registered()) |
| 863 | { |
| 864 | pj_bzero(strm->rec_thread_desc, sizeof(pj_thread_desc)); |
| 865 | status = pj_thread_register("ca_rec", strm->rec_thread_desc, |
| 866 | &strm->rec_thread); |
| 867 | strm->rec_thread_initialized = 1; |
| 868 | PJ_LOG(5,(THIS_FILE, "Recorder thread started, (%i frames)", |
| 869 | inNumberFrames)); |
| 870 | } |
| 871 | |
| 872 | buf->mBuffers[0].mData = NULL; |
| 873 | buf->mBuffers[0].mDataByteSize = inNumberFrames * |
| 874 | strm->streamFormat.mChannelsPerFrame; |
| 875 | /* Render the unit to get input data */ |
| 876 | ostatus = AudioUnitRender(strm->io_units[0], |
| 877 | ioActionFlags, |
| 878 | inTimeStamp, |
| 879 | inBusNumber, |
| 880 | inNumberFrames, |
| 881 | buf); |
| 882 | |
| 883 | if (ostatus != noErr) { |
| 884 | PJ_LOG(5, (THIS_FILE, "Core audio unit render error %i", ostatus)); |
| 885 | goto on_break; |
| 886 | } |
| 887 | input = (pj_int16_t *)buf->mBuffers[0].mData; |
| 888 | |
| 889 | /* Calculate number of samples we've got */ |
| 890 | nsamples = inNumberFrames * strm->param.channel_count + |
| 891 | strm->rec_buf_count; |
| 892 | if (nsamples >= strm->param.samples_per_frame) { |
| 893 | pjmedia_frame frame; |
| 894 | |
| 895 | frame.type = PJMEDIA_FRAME_TYPE_AUDIO; |
| 896 | frame.size = strm->param.samples_per_frame * |
| 897 | strm->param.bits_per_sample >> 3; |
| 898 | frame.bit_info = 0; |
| 899 | |
| 900 | /* If buffer is not empty, combine the buffer with the just incoming |
| 901 | * samples, then call put_frame. |
| 902 | */ |
| 903 | if (strm->rec_buf_count) { |
| 904 | unsigned chunk_count = 0; |
| 905 | |
| 906 | chunk_count = strm->param.samples_per_frame - strm->rec_buf_count; |
| 907 | pjmedia_copy_samples(strm->rec_buf + strm->rec_buf_count, |
| 908 | input, chunk_count); |
| 909 | |
| 910 | frame.buf = (void*) strm->rec_buf; |
| 911 | frame.timestamp.u64 = strm->rec_timestamp.u64; |
| 912 | |
| 913 | status = (*strm->rec_cb)(strm->user_data, &frame); |
| 914 | |
| 915 | input = input + chunk_count; |
| 916 | nsamples -= strm->param.samples_per_frame; |
| 917 | strm->rec_buf_count = 0; |
| 918 | strm->rec_timestamp.u64 += strm->param.samples_per_frame / |
| 919 | strm->param.channel_count; |
| 920 | } |
| 921 | |
| 922 | /* Give all frames we have */ |
| 923 | while (nsamples >= strm->param.samples_per_frame && status == 0) { |
| 924 | frame.buf = (void*) input; |
| 925 | frame.timestamp.u64 = strm->rec_timestamp.u64; |
| 926 | |
| 927 | status = (*strm->rec_cb)(strm->user_data, &frame); |
| 928 | |
| 929 | input = (pj_int16_t*) input + strm->param.samples_per_frame; |
| 930 | nsamples -= strm->param.samples_per_frame; |
| 931 | strm->rec_timestamp.u64 += strm->param.samples_per_frame / |
| 932 | strm->param.channel_count; |
| 933 | } |
| 934 | |
| 935 | /* Store the remaining samples into the buffer */ |
| 936 | if (nsamples && status == 0) { |
| 937 | strm->rec_buf_count = nsamples; |
| 938 | pjmedia_copy_samples(strm->rec_buf, input, |
| 939 | nsamples); |
| 940 | } |
| 941 | |
| 942 | } else { |
| 943 | /* Not enough samples, let's just store them in the buffer */ |
| 944 | pjmedia_copy_samples(strm->rec_buf + strm->rec_buf_count, |
| 945 | input, |
| 946 | inNumberFrames * strm->param.channel_count); |
| 947 | strm->rec_buf_count += inNumberFrames * strm->param.channel_count; |
| 948 | } |
| 949 | |
| 950 | return noErr; |
| 951 | |
| 952 | on_break: |
| 953 | return -1; |
| 954 | } |
| 955 | |
| 956 | static OSStatus output_renderer(void *inRefCon, |
| 957 | AudioUnitRenderActionFlags *ioActionFlags, |
| 958 | const AudioTimeStamp *inTimeStamp, |
| 959 | UInt32 inBusNumber, |
| 960 | UInt32 inNumberFrames, |
| 961 | AudioBufferList *ioData) |
| 962 | { |
| 963 | struct coreaudio_stream *stream = (struct coreaudio_stream*)inRefCon; |
| 964 | pj_status_t status = 0; |
| 965 | unsigned nsamples_req = inNumberFrames * stream->param.channel_count; |
| 966 | pj_int16_t *output = ioData->mBuffers[0].mData; |
| 967 | |
| 968 | pj_assert(!stream->quit_flag); |
| 969 | |
| 970 | /* Known cases of callback's thread: |
| 971 | * - The thread may be changed in the middle of a session |
| 972 | * it happens when plugging/unplugging headphone. |
| 973 | * - The same thread may be reused in consecutive sessions. The first |
| 974 | * session will leave TLS set, but release the TLS data address, |
| 975 | * so the second session must re-register the callback's thread. |
| 976 | */ |
| 977 | if (stream->play_thread_initialized == 0 || !pj_thread_is_registered()) |
| 978 | { |
| 979 | pj_bzero(stream->play_thread_desc, sizeof(pj_thread_desc)); |
| 980 | status = pj_thread_register("coreaudio", stream->play_thread_desc, |
| 981 | &stream->play_thread); |
| 982 | stream->play_thread_initialized = 1; |
| 983 | PJ_LOG(5,(THIS_FILE, "Player thread started, (%i frames)", |
| 984 | inNumberFrames)); |
| 985 | } |
| 986 | |
| 987 | |
| 988 | /* Check if any buffered samples */ |
| 989 | if (stream->play_buf_count) { |
| 990 | /* samples buffered >= requested by sound device */ |
| 991 | if (stream->play_buf_count >= nsamples_req) { |
| 992 | pjmedia_copy_samples((pj_int16_t*)output, stream->play_buf, |
| 993 | nsamples_req); |
| 994 | stream->play_buf_count -= nsamples_req; |
| 995 | pjmedia_move_samples(stream->play_buf, |
| 996 | stream->play_buf + nsamples_req, |
| 997 | stream->play_buf_count); |
| 998 | nsamples_req = 0; |
| 999 | |
| 1000 | return noErr; |
| 1001 | } |
| 1002 | |
| 1003 | /* samples buffered < requested by sound device */ |
| 1004 | pjmedia_copy_samples((pj_int16_t*)output, stream->play_buf, |
| 1005 | stream->play_buf_count); |
| 1006 | nsamples_req -= stream->play_buf_count; |
| 1007 | output = (pj_int16_t*)output + stream->play_buf_count; |
| 1008 | stream->play_buf_count = 0; |
| 1009 | } |
| 1010 | |
| 1011 | /* Fill output buffer as requested */ |
| 1012 | while (nsamples_req && status == 0) { |
| 1013 | pjmedia_frame frame; |
| 1014 | |
| 1015 | frame.type = PJMEDIA_FRAME_TYPE_AUDIO; |
| 1016 | frame.size = stream->param.samples_per_frame * |
| 1017 | stream->param.bits_per_sample >> 3; |
| 1018 | frame.timestamp.u64 = stream->play_timestamp.u64; |
| 1019 | frame.bit_info = 0; |
| 1020 | |
| 1021 | if (nsamples_req >= stream->param.samples_per_frame) { |
| 1022 | frame.buf = output; |
| 1023 | status = (*stream->play_cb)(stream->user_data, &frame); |
| 1024 | if (status != PJ_SUCCESS) |
| 1025 | goto on_break; |
| 1026 | |
| 1027 | if (frame.type != PJMEDIA_FRAME_TYPE_AUDIO) |
| 1028 | pj_bzero(frame.buf, frame.size); |
| 1029 | |
| 1030 | nsamples_req -= stream->param.samples_per_frame; |
| 1031 | output = (pj_int16_t*)output + stream->param.samples_per_frame; |
| 1032 | } else { |
| 1033 | frame.buf = stream->play_buf; |
| 1034 | status = (*stream->play_cb)(stream->user_data, &frame); |
| 1035 | if (status != PJ_SUCCESS) |
| 1036 | goto on_break; |
| 1037 | |
| 1038 | if (frame.type != PJMEDIA_FRAME_TYPE_AUDIO) |
| 1039 | pj_bzero(frame.buf, frame.size); |
| 1040 | |
| 1041 | pjmedia_copy_samples((pj_int16_t*)output, stream->play_buf, |
| 1042 | nsamples_req); |
| 1043 | stream->play_buf_count = stream->param.samples_per_frame - |
| 1044 | nsamples_req; |
| 1045 | pjmedia_move_samples(stream->play_buf, |
| 1046 | stream->play_buf+nsamples_req, |
| 1047 | stream->play_buf_count); |
| 1048 | nsamples_req = 0; |
| 1049 | } |
| 1050 | |
| 1051 | stream->play_timestamp.u64 += stream->param.samples_per_frame / |
| 1052 | stream->param.channel_count; |
| 1053 | } |
| 1054 | |
| 1055 | return noErr; |
| 1056 | |
| 1057 | on_break: |
| 1058 | return -1; |
| 1059 | } |
| 1060 | |
| 1061 | #if !COREAUDIO_MAC |
| 1062 | static void propListener(void *inClientData, |
| 1063 | AudioSessionPropertyID inID, |
| 1064 | UInt32 inDataSize, |
| 1065 | const void * inData) |
| 1066 | { |
| 1067 | struct coreaudio_factory *cf = (struct coreaudio_factory*)inClientData; |
| 1068 | struct stream_list *it, *itBegin; |
| 1069 | CFDictionaryRef routeDictionary; |
| 1070 | CFNumberRef reason; |
| 1071 | SInt32 reasonVal; |
| 1072 | pj_assert(cf); |
| 1073 | |
| 1074 | if (inID != kAudioSessionProperty_AudioRouteChange) |
| 1075 | return; |
| 1076 | |
| 1077 | routeDictionary = (CFDictionaryRef)inData; |
| 1078 | reason = (CFNumberRef) |
| 1079 | CFDictionaryGetValue( |
| 1080 | routeDictionary, |
| 1081 | CFSTR(kAudioSession_AudioRouteChangeKey_Reason)); |
| 1082 | CFNumberGetValue(reason, kCFNumberSInt32Type, &reasonVal); |
| 1083 | |
| 1084 | if (reasonVal != kAudioSessionRouteChangeReason_OldDeviceUnavailable) { |
| 1085 | PJ_LOG(3, (THIS_FILE, "ignoring audio route change...")); |
| 1086 | return; |
| 1087 | } |
| 1088 | |
| 1089 | PJ_LOG(3, (THIS_FILE, "audio route changed")); |
| 1090 | |
| 1091 | pj_mutex_lock(cf->mutex); |
| 1092 | itBegin = &cf->streams; |
| 1093 | for (it = itBegin->next; it != itBegin; it = it->next) { |
| 1094 | if (it->stream->interrupted) |
| 1095 | continue; |
| 1096 | |
| 1097 | /* |
| 1098 | status = ca_stream_stop((pjmedia_aud_stream *)it->stream); |
| 1099 | status = ca_stream_start((pjmedia_aud_stream *)it->stream); |
| 1100 | if (status != PJ_SUCCESS) { |
| 1101 | PJ_LOG(3, (THIS_FILE, |
| 1102 | "Error: failed to restart the audio unit (%i)", |
| 1103 | status)); |
| 1104 | continue; |
| 1105 | } |
| 1106 | PJ_LOG(3, (THIS_FILE, "core audio unit successfully restarted")); |
| 1107 | */ |
| 1108 | } |
| 1109 | pj_mutex_unlock(cf->mutex); |
| 1110 | } |
| 1111 | |
| 1112 | static void interruptionListener(void *inClientData, UInt32 inInterruption) |
| 1113 | { |
| 1114 | struct stream_list *it, *itBegin; |
| 1115 | pj_status_t status; |
| 1116 | pj_thread_desc thread_desc; |
| 1117 | pj_thread_t *thread; |
| 1118 | |
| 1119 | /* Register the thread with PJLIB, this is must for any external threads |
| 1120 | * which need to use the PJLIB framework. |
| 1121 | */ |
| 1122 | if (!pj_thread_is_registered()) { |
| 1123 | pj_bzero(thread_desc, sizeof(pj_thread_desc)); |
| 1124 | status = pj_thread_register("intListener", thread_desc, &thread); |
| 1125 | } |
| 1126 | |
| 1127 | PJ_LOG(3, (THIS_FILE, "Session interrupted! --- %s ---", |
| 1128 | inInterruption == kAudioSessionBeginInterruption ? |
| 1129 | "Begin Interruption" : "End Interruption")); |
| 1130 | |
| 1131 | if (!cf_instance) |
| 1132 | return; |
| 1133 | |
| 1134 | pj_mutex_lock(cf_instance->mutex); |
| 1135 | itBegin = &cf_instance->streams; |
| 1136 | for (it = itBegin->next; it != itBegin; it = it->next) { |
| 1137 | if (inInterruption == kAudioSessionEndInterruption && |
| 1138 | it->stream->interrupted == PJ_TRUE) |
| 1139 | { |
| 1140 | UInt32 audioCategory; |
| 1141 | OSStatus ostatus; |
| 1142 | |
| 1143 | /* Make sure that your application can receive remote control |
| 1144 | * events by adding the code: |
| 1145 | * [[UIApplication sharedApplication] |
| 1146 | * beginReceivingRemoteControlEvents]; |
| 1147 | * Otherwise audio unit will fail to restart while your |
| 1148 | * application is in the background mode. |
| 1149 | */ |
| 1150 | /* Make sure we set the correct audio category before restarting */ |
| 1151 | audioCategory = kAudioSessionCategory_PlayAndRecord; |
| 1152 | ostatus = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, |
| 1153 | sizeof(audioCategory), |
| 1154 | &audioCategory); |
| 1155 | if (ostatus != kAudioSessionNoError) { |
| 1156 | PJ_LOG(4, (THIS_FILE, |
| 1157 | "Warning: cannot set the audio session category (%i)", |
| 1158 | ostatus)); |
| 1159 | } |
| 1160 | |
| 1161 | /* Restart the stream */ |
| 1162 | status = ca_stream_start((pjmedia_aud_stream*)it->stream); |
| 1163 | if (status != PJ_SUCCESS) { |
| 1164 | PJ_LOG(3, (THIS_FILE, |
| 1165 | "Error: failed to restart the audio unit (%i)", |
| 1166 | status)); |
| 1167 | continue; |
| 1168 | } |
| 1169 | PJ_LOG(3, (THIS_FILE, "core audio unit successfully resumed" |
| 1170 | " after interruption")); |
| 1171 | } else if (inInterruption == kAudioSessionBeginInterruption && |
| 1172 | it->stream->running == PJ_TRUE) |
| 1173 | { |
| 1174 | status = ca_stream_stop((pjmedia_aud_stream*)it->stream); |
| 1175 | it->stream->interrupted = PJ_TRUE; |
| 1176 | } |
| 1177 | } |
| 1178 | pj_mutex_unlock(cf_instance->mutex); |
| 1179 | } |
| 1180 | |
| 1181 | #endif |
| 1182 | |
| 1183 | #if COREAUDIO_MAC |
| 1184 | /* Internal: create audio converter for resampling the recorder device */ |
| 1185 | static pj_status_t create_audio_resample(struct coreaudio_stream *strm, |
| 1186 | AudioStreamBasicDescription *desc) |
| 1187 | { |
| 1188 | OSStatus ostatus; |
| 1189 | |
| 1190 | pj_assert(strm->streamFormat.mSampleRate != desc->mSampleRate); |
| 1191 | pj_assert(NULL == strm->resample); |
| 1192 | pj_assert(NULL == strm->resample_buf); |
| 1193 | |
| 1194 | /* Create the audio converter */ |
| 1195 | ostatus = AudioConverterNew(desc, &strm->streamFormat, &strm->resample); |
| 1196 | if (ostatus != noErr) { |
| 1197 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1198 | } |
| 1199 | |
| 1200 | /* |
| 1201 | * Allocate the buffer required to hold enough input data |
| 1202 | */ |
| 1203 | strm->resample_buf_size = (unsigned)(desc->mSampleRate * |
| 1204 | strm->param.samples_per_frame / |
| 1205 | strm->param.clock_rate); |
| 1206 | strm->resample_buf = (pj_int16_t*) |
| 1207 | pj_pool_alloc(strm->pool, |
| 1208 | strm->resample_buf_size * |
| 1209 | strm->param.bits_per_sample >> 3); |
| 1210 | if (!strm->resample_buf) |
| 1211 | return PJ_ENOMEM; |
| 1212 | strm->resample_buf_count = 0; |
| 1213 | |
| 1214 | return PJ_SUCCESS; |
| 1215 | } |
| 1216 | #endif |
| 1217 | |
| 1218 | /* Internal: create audio unit for recorder/playback device */ |
| 1219 | static pj_status_t create_audio_unit(AudioComponent io_comp, |
| 1220 | AudioDeviceID dev_id, |
| 1221 | pjmedia_dir dir, |
| 1222 | struct coreaudio_stream *strm, |
| 1223 | AudioUnit *io_unit) |
| 1224 | { |
| 1225 | OSStatus ostatus; |
| 1226 | #if !COREAUDIO_MAC |
| 1227 | UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord; |
| 1228 | /* We want to be able to open playback and recording streams */ |
| 1229 | ostatus = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, |
| 1230 | sizeof(audioCategory), |
| 1231 | &audioCategory); |
| 1232 | if (ostatus != kAudioSessionNoError) { |
| 1233 | PJ_LOG(4, (THIS_FILE, |
| 1234 | "Warning: cannot set the audio session category (%i)", |
| 1235 | ostatus)); |
| 1236 | } |
| 1237 | #endif |
| 1238 | |
| 1239 | /* Create an audio unit to interface with the device */ |
| 1240 | ostatus = AudioComponentInstanceNew(io_comp, io_unit); |
| 1241 | if (ostatus != noErr) { |
| 1242 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1243 | } |
| 1244 | |
| 1245 | /* Set audio unit's properties for capture device */ |
| 1246 | if (dir & PJMEDIA_DIR_CAPTURE) { |
| 1247 | UInt32 enable = 1; |
| 1248 | |
| 1249 | /* Enable input */ |
| 1250 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1251 | kAudioOutputUnitProperty_EnableIO, |
| 1252 | kAudioUnitScope_Input, |
| 1253 | 1, |
| 1254 | &enable, |
| 1255 | sizeof(enable)); |
| 1256 | if (ostatus != noErr) { |
| 1257 | PJ_LOG(4, (THIS_FILE, |
| 1258 | "Warning: cannot enable IO of capture device %d", |
| 1259 | dev_id)); |
| 1260 | } |
| 1261 | |
| 1262 | /* Disable output */ |
| 1263 | if (!(dir & PJMEDIA_DIR_PLAYBACK)) { |
| 1264 | enable = 0; |
| 1265 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1266 | kAudioOutputUnitProperty_EnableIO, |
| 1267 | kAudioUnitScope_Output, |
| 1268 | 0, |
| 1269 | &enable, |
| 1270 | sizeof(enable)); |
| 1271 | if (ostatus != noErr) { |
| 1272 | PJ_LOG(4, (THIS_FILE, |
| 1273 | "Warning: cannot disable IO of capture device %d", |
| 1274 | dev_id)); |
| 1275 | } |
| 1276 | } |
| 1277 | } |
| 1278 | |
| 1279 | /* Set audio unit's properties for playback device */ |
| 1280 | if (dir & PJMEDIA_DIR_PLAYBACK) { |
| 1281 | UInt32 enable = 1; |
| 1282 | |
| 1283 | /* Enable output */ |
| 1284 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1285 | kAudioOutputUnitProperty_EnableIO, |
| 1286 | kAudioUnitScope_Output, |
| 1287 | 0, |
| 1288 | &enable, |
| 1289 | sizeof(enable)); |
| 1290 | if (ostatus != noErr) { |
| 1291 | PJ_LOG(4, (THIS_FILE, |
| 1292 | "Warning: cannot enable IO of playback device %d", |
| 1293 | dev_id)); |
| 1294 | } |
| 1295 | |
| 1296 | } |
| 1297 | |
| 1298 | #if COREAUDIO_MAC |
| 1299 | PJ_LOG(5, (THIS_FILE, "Opening device %d", dev_id)); |
| 1300 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1301 | kAudioOutputUnitProperty_CurrentDevice, |
| 1302 | kAudioUnitScope_Global, |
| 1303 | 0, |
| 1304 | &dev_id, |
| 1305 | sizeof(dev_id)); |
| 1306 | if (ostatus != noErr) { |
| 1307 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1308 | } |
| 1309 | #endif |
| 1310 | |
| 1311 | if (dir & PJMEDIA_DIR_CAPTURE) { |
| 1312 | #if COREAUDIO_MAC |
| 1313 | AudioStreamBasicDescription deviceFormat; |
| 1314 | UInt32 size; |
| 1315 | |
| 1316 | /* |
| 1317 | * Keep the sample rate from the device, otherwise we will confuse |
| 1318 | * AUHAL |
| 1319 | */ |
| 1320 | size = sizeof(AudioStreamBasicDescription); |
| 1321 | ostatus = AudioUnitGetProperty(*io_unit, |
| 1322 | kAudioUnitProperty_StreamFormat, |
| 1323 | kAudioUnitScope_Input, |
| 1324 | 1, |
| 1325 | &deviceFormat, |
| 1326 | &size); |
| 1327 | if (ostatus != noErr) { |
| 1328 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1329 | } |
| 1330 | strm->streamFormat.mSampleRate = deviceFormat.mSampleRate; |
| 1331 | #endif |
| 1332 | |
| 1333 | /* When setting the stream format, we have to make sure the sample |
| 1334 | * rate is supported. Setting an unsupported sample rate will cause |
| 1335 | * AudioUnitRender() to fail later. |
| 1336 | */ |
| 1337 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1338 | kAudioUnitProperty_StreamFormat, |
| 1339 | kAudioUnitScope_Output, |
| 1340 | 1, |
| 1341 | &strm->streamFormat, |
| 1342 | sizeof(strm->streamFormat)); |
| 1343 | if (ostatus != noErr) { |
| 1344 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1345 | } |
| 1346 | |
| 1347 | #if COREAUDIO_MAC |
| 1348 | strm->streamFormat.mSampleRate = strm->param.clock_rate; |
| 1349 | size = sizeof(AudioStreamBasicDescription); |
| 1350 | ostatus = AudioUnitGetProperty (*io_unit, |
| 1351 | kAudioUnitProperty_StreamFormat, |
| 1352 | kAudioUnitScope_Output, |
| 1353 | 1, |
| 1354 | &deviceFormat, |
| 1355 | &size); |
| 1356 | if (ostatus == noErr) { |
| 1357 | if (strm->streamFormat.mSampleRate != deviceFormat.mSampleRate) { |
| 1358 | pj_status_t rc = create_audio_resample(strm, &deviceFormat); |
| 1359 | if (PJ_SUCCESS != rc) |
| 1360 | return rc; |
| 1361 | } |
| 1362 | } else { |
| 1363 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1364 | } |
| 1365 | #endif |
| 1366 | } |
| 1367 | |
| 1368 | if (dir & PJMEDIA_DIR_PLAYBACK) { |
| 1369 | AURenderCallbackStruct output_cb; |
| 1370 | |
| 1371 | /* Set the stream format */ |
| 1372 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1373 | kAudioUnitProperty_StreamFormat, |
| 1374 | kAudioUnitScope_Input, |
| 1375 | 0, |
| 1376 | &strm->streamFormat, |
| 1377 | sizeof(strm->streamFormat)); |
| 1378 | if (ostatus != noErr) { |
| 1379 | PJ_LOG(4, (THIS_FILE, |
| 1380 | "Warning: cannot set playback stream format of dev %d", |
| 1381 | dev_id)); |
| 1382 | } |
| 1383 | |
| 1384 | /* Set render callback */ |
| 1385 | output_cb.inputProc = output_renderer; |
| 1386 | output_cb.inputProcRefCon = strm; |
| 1387 | ostatus = AudioUnitSetProperty(*io_unit, |
| 1388 | kAudioUnitProperty_SetRenderCallback, |
| 1389 | kAudioUnitScope_Input, |
| 1390 | 0, |
| 1391 | &output_cb, |
| 1392 | sizeof(output_cb)); |
| 1393 | if (ostatus != noErr) { |
| 1394 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1395 | } |
| 1396 | |
| 1397 | /* Allocate playback buffer */ |
| 1398 | strm->play_buf = (pj_int16_t*)pj_pool_alloc(strm->pool, |
| 1399 | strm->param.samples_per_frame * |
| 1400 | strm->param.bits_per_sample >> 3); |
| 1401 | if (!strm->play_buf) |
| 1402 | return PJ_ENOMEM; |
| 1403 | strm->play_buf_count = 0; |
| 1404 | } |
| 1405 | |
| 1406 | if (dir & PJMEDIA_DIR_CAPTURE) { |
| 1407 | AURenderCallbackStruct input_cb; |
| 1408 | #if COREAUDIO_MAC |
| 1409 | AudioBuffer *ab; |
| 1410 | UInt32 size, buf_size; |
| 1411 | #endif |
| 1412 | |
| 1413 | /* Set input callback */ |
| 1414 | input_cb.inputProc = strm->resample ? resample_callback : |
| 1415 | input_callback; |
| 1416 | input_cb.inputProcRefCon = strm; |
| 1417 | ostatus = AudioUnitSetProperty( |
| 1418 | *io_unit, |
| 1419 | kAudioOutputUnitProperty_SetInputCallback, |
| 1420 | kAudioUnitScope_Global, |
| 1421 | 0, |
| 1422 | &input_cb, |
| 1423 | sizeof(input_cb)); |
| 1424 | if (ostatus != noErr) { |
| 1425 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1426 | } |
| 1427 | |
| 1428 | #if COREAUDIO_MAC |
| 1429 | /* Get device's buffer frame size */ |
| 1430 | size = sizeof(UInt32); |
| 1431 | ostatus = AudioUnitGetProperty(*io_unit, |
| 1432 | kAudioDevicePropertyBufferFrameSize, |
| 1433 | kAudioUnitScope_Global, |
| 1434 | 0, |
| 1435 | &buf_size, |
| 1436 | &size); |
| 1437 | if (ostatus != noErr) |
| 1438 | { |
| 1439 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1440 | } |
| 1441 | |
| 1442 | /* Allocate audio buffer */ |
| 1443 | strm->audio_buf = (AudioBufferList*)pj_pool_alloc(strm->pool, |
| 1444 | sizeof(AudioBufferList) + sizeof(AudioBuffer)); |
| 1445 | if (!strm->audio_buf) |
| 1446 | return PJ_ENOMEM; |
| 1447 | |
| 1448 | strm->audio_buf->mNumberBuffers = 1; |
| 1449 | ab = &strm->audio_buf->mBuffers[0]; |
| 1450 | ab->mNumberChannels = strm->streamFormat.mChannelsPerFrame; |
| 1451 | ab->mDataByteSize = buf_size * ab->mNumberChannels * |
| 1452 | strm->param.bits_per_sample >> 3; |
| 1453 | ab->mData = pj_pool_alloc(strm->pool, |
| 1454 | ab->mDataByteSize); |
| 1455 | if (!ab->mData) |
| 1456 | return PJ_ENOMEM; |
| 1457 | |
| 1458 | #else |
| 1459 | /* We will let AudioUnitRender() to allocate the buffer |
| 1460 | * for us later |
| 1461 | */ |
| 1462 | strm->audio_buf = (AudioBufferList*)pj_pool_alloc(strm->pool, |
| 1463 | sizeof(AudioBufferList) + sizeof(AudioBuffer)); |
| 1464 | if (!strm->audio_buf) |
| 1465 | return PJ_ENOMEM; |
| 1466 | |
| 1467 | strm->audio_buf->mNumberBuffers = 1; |
| 1468 | strm->audio_buf->mBuffers[0].mNumberChannels = |
| 1469 | strm->streamFormat.mChannelsPerFrame; |
| 1470 | |
| 1471 | #endif |
| 1472 | |
| 1473 | /* Allocate recording buffer */ |
| 1474 | strm->rec_buf = (pj_int16_t*)pj_pool_alloc(strm->pool, |
| 1475 | strm->param.samples_per_frame * |
| 1476 | strm->param.bits_per_sample >> 3); |
| 1477 | if (!strm->rec_buf) |
| 1478 | return PJ_ENOMEM; |
| 1479 | strm->rec_buf_count = 0; |
| 1480 | } |
| 1481 | |
| 1482 | /* Initialize the audio unit */ |
| 1483 | ostatus = AudioUnitInitialize(*io_unit); |
| 1484 | if (ostatus != noErr) { |
| 1485 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1486 | } |
| 1487 | |
| 1488 | return PJ_SUCCESS; |
| 1489 | } |
| 1490 | |
| 1491 | /* API: create stream */ |
| 1492 | static pj_status_t ca_factory_create_stream(pjmedia_aud_dev_factory *f, |
| 1493 | const pjmedia_aud_param *param, |
| 1494 | pjmedia_aud_rec_cb rec_cb, |
| 1495 | pjmedia_aud_play_cb play_cb, |
| 1496 | void *user_data, |
| 1497 | pjmedia_aud_stream **p_aud_strm) |
| 1498 | { |
| 1499 | struct coreaudio_factory *cf = (struct coreaudio_factory*)f; |
| 1500 | pj_pool_t *pool; |
| 1501 | struct coreaudio_stream *strm; |
| 1502 | pj_status_t status; |
| 1503 | |
| 1504 | /* Create and Initialize stream descriptor */ |
| 1505 | pool = pj_pool_create(cf->pf, "coreaudio-dev", 1000, 1000, NULL); |
| 1506 | PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM); |
| 1507 | |
| 1508 | strm = PJ_POOL_ZALLOC_T(pool, struct coreaudio_stream); |
| 1509 | pj_list_init(&strm->list_entry); |
| 1510 | strm->list_entry.stream = strm; |
| 1511 | strm->cf = cf; |
| 1512 | pj_memcpy(&strm->param, param, sizeof(*param)); |
| 1513 | strm->pool = pool; |
| 1514 | strm->rec_cb = rec_cb; |
| 1515 | strm->play_cb = play_cb; |
| 1516 | strm->user_data = user_data; |
| 1517 | |
| 1518 | /* Set the stream format */ |
| 1519 | strm->streamFormat.mSampleRate = param->clock_rate; |
| 1520 | strm->streamFormat.mFormatID = kAudioFormatLinearPCM; |
| 1521 | strm->streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
| 1522 | | kLinearPCMFormatFlagIsPacked; |
| 1523 | strm->streamFormat.mBitsPerChannel = strm->param.bits_per_sample; |
| 1524 | strm->streamFormat.mChannelsPerFrame = param->channel_count; |
| 1525 | strm->streamFormat.mBytesPerFrame = strm->streamFormat.mChannelsPerFrame |
| 1526 | * strm->param.bits_per_sample >> 3; |
| 1527 | strm->streamFormat.mFramesPerPacket = 1; |
| 1528 | strm->streamFormat.mBytesPerPacket = strm->streamFormat.mBytesPerFrame * |
| 1529 | strm->streamFormat.mFramesPerPacket; |
| 1530 | |
| 1531 | /* Apply input/output routes settings before we create the audio units */ |
| 1532 | if (param->flags & PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE) { |
| 1533 | ca_stream_set_cap(&strm->base, |
| 1534 | PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE, |
| 1535 | ¶m->input_route); |
| 1536 | } |
| 1537 | if (param->flags & PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE) { |
| 1538 | ca_stream_set_cap(&strm->base, |
| 1539 | PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE, |
| 1540 | ¶m->output_route); |
| 1541 | } |
| 1542 | if (param->flags & PJMEDIA_AUD_DEV_CAP_EC) { |
| 1543 | ca_stream_set_cap(&strm->base, |
| 1544 | PJMEDIA_AUD_DEV_CAP_EC, |
| 1545 | ¶m->ec_enabled); |
| 1546 | } else { |
| 1547 | pj_bool_t ec = PJ_FALSE; |
| 1548 | ca_stream_set_cap(&strm->base, |
| 1549 | PJMEDIA_AUD_DEV_CAP_EC, &ec); |
| 1550 | } |
| 1551 | |
| 1552 | strm->io_units[0] = strm->io_units[1] = NULL; |
| 1553 | if (param->dir == PJMEDIA_DIR_CAPTURE_PLAYBACK && |
| 1554 | param->rec_id == param->play_id) |
| 1555 | { |
| 1556 | /* If both input and output are on the same device, only create |
| 1557 | * one audio unit to interface with the device. |
| 1558 | */ |
| 1559 | status = create_audio_unit(cf->io_comp, |
| 1560 | cf->dev_info[param->rec_id].dev_id, |
| 1561 | param->dir, strm, &strm->io_units[0]); |
| 1562 | if (status != PJ_SUCCESS) |
| 1563 | goto on_error; |
| 1564 | } else { |
| 1565 | unsigned nunits = 0; |
| 1566 | |
| 1567 | if (param->dir & PJMEDIA_DIR_CAPTURE) { |
| 1568 | status = create_audio_unit(cf->io_comp, |
| 1569 | cf->dev_info[param->rec_id].dev_id, |
| 1570 | PJMEDIA_DIR_CAPTURE, |
| 1571 | strm, &strm->io_units[nunits++]); |
| 1572 | if (status != PJ_SUCCESS) |
| 1573 | goto on_error; |
| 1574 | } |
| 1575 | if (param->dir & PJMEDIA_DIR_PLAYBACK) { |
| 1576 | |
| 1577 | status = create_audio_unit(cf->io_comp, |
| 1578 | cf->dev_info[param->play_id].dev_id, |
| 1579 | PJMEDIA_DIR_PLAYBACK, |
| 1580 | strm, &strm->io_units[nunits++]); |
| 1581 | if (status != PJ_SUCCESS) |
| 1582 | goto on_error; |
| 1583 | } |
| 1584 | } |
| 1585 | |
| 1586 | /* Apply the remaining settings */ |
| 1587 | if (param->flags & PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY) { |
| 1588 | ca_stream_get_cap(&strm->base, |
| 1589 | PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY, |
| 1590 | &strm->param.input_latency_ms); |
| 1591 | } |
| 1592 | if (param->flags & PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY) { |
| 1593 | ca_stream_get_cap(&strm->base, |
| 1594 | PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY, |
| 1595 | &strm->param.output_latency_ms); |
| 1596 | } |
| 1597 | if (param->flags & PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING) { |
| 1598 | ca_stream_set_cap(&strm->base, |
| 1599 | PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING, |
| 1600 | ¶m->output_vol); |
| 1601 | } |
| 1602 | |
| 1603 | pj_mutex_lock(strm->cf->mutex); |
| 1604 | pj_assert(pj_list_empty(&strm->list_entry)); |
| 1605 | pj_list_insert_after(&strm->cf->streams, &strm->list_entry); |
| 1606 | pj_mutex_unlock(strm->cf->mutex); |
| 1607 | |
| 1608 | /* Done */ |
| 1609 | strm->base.op = &stream_op; |
| 1610 | *p_aud_strm = &strm->base; |
| 1611 | |
| 1612 | return PJ_SUCCESS; |
| 1613 | |
| 1614 | on_error: |
| 1615 | ca_stream_destroy((pjmedia_aud_stream *)strm); |
| 1616 | return status; |
| 1617 | } |
| 1618 | |
| 1619 | /* API: Get stream info. */ |
| 1620 | static pj_status_t ca_stream_get_param(pjmedia_aud_stream *s, |
| 1621 | pjmedia_aud_param *pi) |
| 1622 | { |
| 1623 | struct coreaudio_stream *strm = (struct coreaudio_stream*)s; |
| 1624 | |
| 1625 | PJ_ASSERT_RETURN(strm && pi, PJ_EINVAL); |
| 1626 | |
| 1627 | pj_memcpy(pi, &strm->param, sizeof(*pi)); |
| 1628 | |
| 1629 | /* Update the device capabilities' values */ |
| 1630 | if (ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY, |
| 1631 | &pi->input_latency_ms) == PJ_SUCCESS) |
| 1632 | { |
| 1633 | pi->flags |= PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY; |
| 1634 | } |
| 1635 | if (ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY, |
| 1636 | &pi->output_latency_ms) == PJ_SUCCESS) |
| 1637 | { |
| 1638 | pi->flags |= PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY; |
| 1639 | } |
| 1640 | if (ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING, |
| 1641 | &pi->output_vol) == PJ_SUCCESS) |
| 1642 | { |
| 1643 | pi->flags |= PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING; |
| 1644 | } |
| 1645 | if (ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE, |
| 1646 | &pi->input_route) == PJ_SUCCESS) |
| 1647 | { |
| 1648 | pi->flags |= PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE; |
| 1649 | } |
| 1650 | if (ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE, |
| 1651 | &pi->output_route) == PJ_SUCCESS) |
| 1652 | { |
| 1653 | pi->flags |= PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE; |
| 1654 | } |
| 1655 | if (ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_EC, |
| 1656 | &pi->ec_enabled) == PJ_SUCCESS) |
| 1657 | { |
| 1658 | pi->flags |= PJMEDIA_AUD_DEV_CAP_EC; |
| 1659 | } |
| 1660 | |
| 1661 | return PJ_SUCCESS; |
| 1662 | } |
| 1663 | |
| 1664 | /* API: get capability */ |
| 1665 | static pj_status_t ca_stream_get_cap(pjmedia_aud_stream *s, |
| 1666 | pjmedia_aud_dev_cap cap, |
| 1667 | void *pval) |
| 1668 | { |
| 1669 | struct coreaudio_stream *strm = (struct coreaudio_stream*)s; |
| 1670 | |
| 1671 | PJ_UNUSED_ARG(strm); |
| 1672 | |
| 1673 | PJ_ASSERT_RETURN(s && pval, PJ_EINVAL); |
| 1674 | |
| 1675 | if (cap==PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY && |
| 1676 | (strm->param.dir & PJMEDIA_DIR_CAPTURE)) |
| 1677 | { |
| 1678 | #if COREAUDIO_MAC |
| 1679 | UInt32 latency, size = sizeof(UInt32); |
| 1680 | |
| 1681 | /* Recording latency */ |
| 1682 | if (AudioUnitGetProperty (strm->io_units[0], |
| 1683 | kAudioDevicePropertyLatency, |
| 1684 | kAudioUnitScope_Input, |
| 1685 | 1, |
| 1686 | &latency, |
| 1687 | &size) == noErr) |
| 1688 | { |
| 1689 | UInt32 latency2; |
| 1690 | if (AudioUnitGetProperty (strm->io_units[0], |
| 1691 | kAudioDevicePropertyBufferFrameSize, |
| 1692 | kAudioUnitScope_Input, |
| 1693 | 1, |
| 1694 | &latency2, |
| 1695 | &size) == noErr) |
| 1696 | { |
| 1697 | strm->param.input_latency_ms = (latency + latency2) * 1000 / |
| 1698 | strm->param.clock_rate; |
| 1699 | strm->param.input_latency_ms++; |
| 1700 | } |
| 1701 | } |
| 1702 | #else |
| 1703 | Float32 latency, latency2; |
| 1704 | UInt32 size = sizeof(Float32); |
| 1705 | |
| 1706 | if ((AudioSessionGetProperty( |
| 1707 | kAudioSessionProperty_CurrentHardwareInputLatency, |
| 1708 | &size, &latency) == kAudioSessionNoError) && |
| 1709 | (AudioSessionGetProperty( |
| 1710 | kAudioSessionProperty_CurrentHardwareIOBufferDuration, |
| 1711 | &size, &latency2) == kAudioSessionNoError)) |
| 1712 | { |
| 1713 | strm->param.input_latency_ms = (unsigned) |
| 1714 | ((latency + latency2) * 1000); |
| 1715 | strm->param.input_latency_ms++; |
| 1716 | } |
| 1717 | #endif |
| 1718 | |
| 1719 | *(unsigned*)pval = strm->param.input_latency_ms; |
| 1720 | return PJ_SUCCESS; |
| 1721 | } else if (cap==PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY && |
| 1722 | (strm->param.dir & PJMEDIA_DIR_PLAYBACK)) |
| 1723 | { |
| 1724 | #if COREAUDIO_MAC |
| 1725 | UInt32 latency, size = sizeof(UInt32); |
| 1726 | AudioUnit *io_unit = strm->io_units[1] ? &strm->io_units[1] : |
| 1727 | &strm->io_units[0]; |
| 1728 | |
| 1729 | /* Playback latency */ |
| 1730 | if (AudioUnitGetProperty (*io_unit, |
| 1731 | kAudioDevicePropertyLatency, |
| 1732 | kAudioUnitScope_Output, |
| 1733 | 0, |
| 1734 | &latency, |
| 1735 | &size) == noErr) |
| 1736 | { |
| 1737 | UInt32 latency2; |
| 1738 | if (AudioUnitGetProperty (*io_unit, |
| 1739 | kAudioDevicePropertyBufferFrameSize, |
| 1740 | kAudioUnitScope_Output, |
| 1741 | 0, |
| 1742 | &latency2, |
| 1743 | &size) == noErr) |
| 1744 | { |
| 1745 | strm->param.output_latency_ms = (latency + latency2) * 1000 / |
| 1746 | strm->param.clock_rate; |
| 1747 | strm->param.output_latency_ms++; |
| 1748 | } |
| 1749 | } |
| 1750 | #else |
| 1751 | Float32 latency, latency2; |
| 1752 | UInt32 size = sizeof(Float32); |
| 1753 | |
| 1754 | if ((AudioSessionGetProperty( |
| 1755 | kAudioSessionProperty_CurrentHardwareOutputLatency, |
| 1756 | &size, &latency) == kAudioSessionNoError) && |
| 1757 | (AudioSessionGetProperty( |
| 1758 | kAudioSessionProperty_CurrentHardwareIOBufferDuration, |
| 1759 | &size, &latency2) == kAudioSessionNoError)) |
| 1760 | { |
| 1761 | strm->param.output_latency_ms = (unsigned) |
| 1762 | ((latency + latency2) * 1000); |
| 1763 | strm->param.output_latency_ms++; |
| 1764 | } |
| 1765 | #endif |
| 1766 | *(unsigned*)pval = (++strm->param.output_latency_ms * 2); |
| 1767 | return PJ_SUCCESS; |
| 1768 | } else if (cap==PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING && |
| 1769 | (strm->param.dir & PJMEDIA_DIR_PLAYBACK)) |
| 1770 | { |
| 1771 | OSStatus ostatus; |
| 1772 | Float32 volume; |
| 1773 | UInt32 size = sizeof(Float32); |
| 1774 | |
| 1775 | /* Output volume setting */ |
| 1776 | #if COREAUDIO_MAC |
| 1777 | ostatus = AudioUnitGetProperty (strm->io_units[1] ? strm->io_units[1] : |
| 1778 | strm->io_units[0], |
| 1779 | kAudioDevicePropertyVolumeScalar, |
| 1780 | kAudioUnitScope_Output, |
| 1781 | 0, |
| 1782 | &volume, |
| 1783 | &size); |
| 1784 | if (ostatus != noErr) |
| 1785 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1786 | #else |
| 1787 | ostatus = AudioSessionGetProperty( |
| 1788 | kAudioSessionProperty_CurrentHardwareOutputVolume, |
| 1789 | &size, &volume); |
| 1790 | if (ostatus != kAudioSessionNoError) { |
| 1791 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1792 | } |
| 1793 | #endif |
| 1794 | |
| 1795 | *(unsigned*)pval = (unsigned)(volume * 100); |
| 1796 | return PJ_SUCCESS; |
| 1797 | #if !COREAUDIO_MAC |
| 1798 | } else if (cap==PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE && |
| 1799 | (strm->param.dir & PJMEDIA_DIR_CAPTURE)) |
| 1800 | { |
| 1801 | UInt32 btooth, size = sizeof(UInt32); |
| 1802 | OSStatus ostatus; |
| 1803 | |
| 1804 | ostatus = AudioSessionGetProperty ( |
| 1805 | kAudioSessionProperty_OverrideCategoryEnableBluetoothInput, |
| 1806 | &size, &btooth); |
| 1807 | if (ostatus != kAudioSessionNoError) { |
| 1808 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1809 | } |
| 1810 | |
| 1811 | *(pjmedia_aud_dev_route*)pval = btooth? |
| 1812 | PJMEDIA_AUD_DEV_ROUTE_BLUETOOTH: |
| 1813 | PJMEDIA_AUD_DEV_ROUTE_DEFAULT; |
| 1814 | return PJ_SUCCESS; |
| 1815 | } else if (cap==PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE && |
| 1816 | (strm->param.dir & PJMEDIA_DIR_PLAYBACK)) |
| 1817 | { |
| 1818 | CFStringRef route; |
| 1819 | UInt32 size = sizeof(CFStringRef); |
| 1820 | OSStatus ostatus; |
| 1821 | |
| 1822 | ostatus = AudioSessionGetProperty (kAudioSessionProperty_AudioRoute, |
| 1823 | &size, &route); |
| 1824 | if (ostatus != kAudioSessionNoError) { |
| 1825 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1826 | } |
| 1827 | |
| 1828 | if (!route) { |
| 1829 | *(pjmedia_aud_dev_route*)pval = PJMEDIA_AUD_DEV_ROUTE_DEFAULT; |
| 1830 | } else if (CFStringHasPrefix(route, CFSTR("Headset"))) { |
| 1831 | *(pjmedia_aud_dev_route*)pval = PJMEDIA_AUD_DEV_ROUTE_EARPIECE; |
| 1832 | } else { |
| 1833 | *(pjmedia_aud_dev_route*)pval = PJMEDIA_AUD_DEV_ROUTE_DEFAULT; |
| 1834 | } |
| 1835 | |
| 1836 | CFRelease(route); |
| 1837 | |
| 1838 | return PJ_SUCCESS; |
| 1839 | } else if (cap==PJMEDIA_AUD_DEV_CAP_EC) { |
| 1840 | AudioComponentDescription desc; |
| 1841 | OSStatus ostatus; |
| 1842 | |
| 1843 | ostatus = AudioComponentGetDescription(strm->cf->io_comp, &desc); |
| 1844 | if (ostatus != noErr) { |
| 1845 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1846 | } |
| 1847 | |
| 1848 | *(pj_bool_t*)pval = (desc.componentSubType == |
| 1849 | kAudioUnitSubType_VoiceProcessingIO); |
| 1850 | return PJ_SUCCESS; |
| 1851 | #endif |
| 1852 | } else { |
| 1853 | return PJMEDIA_EAUD_INVCAP; |
| 1854 | } |
| 1855 | } |
| 1856 | |
| 1857 | /* API: set capability */ |
| 1858 | static pj_status_t ca_stream_set_cap(pjmedia_aud_stream *s, |
| 1859 | pjmedia_aud_dev_cap cap, |
| 1860 | const void *pval) |
| 1861 | { |
| 1862 | struct coreaudio_stream *strm = (struct coreaudio_stream*)s; |
| 1863 | |
| 1864 | PJ_ASSERT_RETURN(s && pval, PJ_EINVAL); |
| 1865 | |
| 1866 | #if COREAUDIO_MAC |
| 1867 | if (cap==PJMEDIA_AUD_DEV_CAP_OUTPUT_VOLUME_SETTING && |
| 1868 | (strm->param.dir & PJMEDIA_DIR_PLAYBACK)) |
| 1869 | { |
| 1870 | OSStatus ostatus; |
| 1871 | Float32 volume = *(unsigned*)pval; |
| 1872 | |
| 1873 | /* Output volume setting */ |
| 1874 | volume /= 100.0; |
| 1875 | ostatus = AudioUnitSetProperty (strm->io_units[1] ? strm->io_units[1] : |
| 1876 | strm->io_units[0], |
| 1877 | kAudioDevicePropertyVolumeScalar, |
| 1878 | kAudioUnitScope_Output, |
| 1879 | 0, |
| 1880 | &volume, |
| 1881 | sizeof(Float32)); |
| 1882 | if (ostatus != noErr) { |
| 1883 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1884 | } |
| 1885 | strm->param.output_vol = *(unsigned*)pval; |
| 1886 | return PJ_SUCCESS; |
| 1887 | } |
| 1888 | |
| 1889 | #else |
| 1890 | |
| 1891 | if ((cap==PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY && |
| 1892 | (strm->param.dir & PJMEDIA_DIR_CAPTURE)) || |
| 1893 | (cap==PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY && |
| 1894 | (strm->param.dir & PJMEDIA_DIR_PLAYBACK))) |
| 1895 | { |
| 1896 | Float32 bufferDuration = *(unsigned *)pval; |
| 1897 | OSStatus ostatus; |
| 1898 | unsigned latency; |
| 1899 | |
| 1900 | /* For low-latency audio streaming, you can set this value to |
| 1901 | * as low as 5 ms (the default is 23ms). However, lowering the |
| 1902 | * latency may cause a decrease in audio quality. |
| 1903 | */ |
| 1904 | bufferDuration /= 1000; |
| 1905 | ostatus = AudioSessionSetProperty( |
| 1906 | kAudioSessionProperty_PreferredHardwareIOBufferDuration, |
| 1907 | sizeof(bufferDuration), &bufferDuration); |
| 1908 | if (ostatus != kAudioSessionNoError) { |
| 1909 | PJ_LOG(4, (THIS_FILE, |
| 1910 | "Error: cannot set the preferred buffer duration (%i)", |
| 1911 | ostatus)); |
| 1912 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1913 | } |
| 1914 | |
| 1915 | ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_INPUT_LATENCY, &latency); |
| 1916 | ca_stream_get_cap(s, PJMEDIA_AUD_DEV_CAP_OUTPUT_LATENCY, &latency); |
| 1917 | |
| 1918 | return PJ_SUCCESS; |
| 1919 | } else if (cap==PJMEDIA_AUD_DEV_CAP_INPUT_ROUTE && |
| 1920 | (strm->param.dir & PJMEDIA_DIR_CAPTURE)) |
| 1921 | { |
| 1922 | UInt32 btooth = *(pjmedia_aud_dev_route*)pval == |
| 1923 | PJMEDIA_AUD_DEV_ROUTE_BLUETOOTH ? 1 : 0; |
| 1924 | OSStatus ostatus; |
| 1925 | |
| 1926 | ostatus = AudioSessionSetProperty ( |
| 1927 | kAudioSessionProperty_OverrideCategoryEnableBluetoothInput, |
| 1928 | sizeof(btooth), &btooth); |
| 1929 | if (ostatus != kAudioSessionNoError) { |
| 1930 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1931 | } |
| 1932 | strm->param.input_route = *(pjmedia_aud_dev_route*)pval; |
| 1933 | return PJ_SUCCESS; |
| 1934 | } else if (cap==PJMEDIA_AUD_DEV_CAP_OUTPUT_ROUTE && |
| 1935 | (strm->param.dir & PJMEDIA_DIR_PLAYBACK)) |
| 1936 | { |
| 1937 | OSStatus ostatus; |
| 1938 | UInt32 route = *(pjmedia_aud_dev_route*)pval == |
| 1939 | PJMEDIA_AUD_DEV_ROUTE_LOUDSPEAKER ? |
| 1940 | kAudioSessionOverrideAudioRoute_Speaker : |
| 1941 | kAudioSessionOverrideAudioRoute_None; |
| 1942 | |
| 1943 | ostatus = AudioSessionSetProperty ( |
| 1944 | kAudioSessionProperty_OverrideAudioRoute, |
| 1945 | sizeof(route), &route); |
| 1946 | if (ostatus != kAudioSessionNoError) { |
| 1947 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 1948 | } |
| 1949 | strm->param.output_route = *(pjmedia_aud_dev_route*)pval; |
| 1950 | return PJ_SUCCESS; |
| 1951 | } else if (cap==PJMEDIA_AUD_DEV_CAP_EC) { |
| 1952 | AudioComponentDescription desc; |
| 1953 | AudioComponent io_comp; |
| 1954 | |
| 1955 | desc.componentType = kAudioUnitType_Output; |
| 1956 | desc.componentSubType = (*(pj_bool_t*)pval)? |
| 1957 | kAudioUnitSubType_VoiceProcessingIO : |
| 1958 | kAudioUnitSubType_RemoteIO; |
| 1959 | desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| 1960 | desc.componentFlags = 0; |
| 1961 | desc.componentFlagsMask = 0; |
| 1962 | |
| 1963 | io_comp = AudioComponentFindNext(NULL, &desc); |
| 1964 | if (io_comp == NULL) |
| 1965 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(-1); |
| 1966 | strm->cf->io_comp = io_comp; |
| 1967 | strm->param.ec_enabled = *(pj_bool_t*)pval; |
| 1968 | |
| 1969 | PJ_LOG(4, (THIS_FILE, "Using %s audio unit", |
| 1970 | (desc.componentSubType == |
| 1971 | kAudioUnitSubType_RemoteIO? "RemoteIO": |
| 1972 | "VoiceProcessingIO"))); |
| 1973 | |
| 1974 | return PJ_SUCCESS; |
| 1975 | } |
| 1976 | #endif |
| 1977 | |
| 1978 | return PJMEDIA_EAUD_INVCAP; |
| 1979 | } |
| 1980 | |
| 1981 | /* API: Start stream. */ |
| 1982 | static pj_status_t ca_stream_start(pjmedia_aud_stream *strm) |
| 1983 | { |
| 1984 | struct coreaudio_stream *stream = (struct coreaudio_stream*)strm; |
| 1985 | OSStatus ostatus; |
| 1986 | UInt32 i; |
| 1987 | |
| 1988 | if (stream->running) |
| 1989 | return PJ_SUCCESS; |
| 1990 | |
| 1991 | stream->quit_flag = 0; |
| 1992 | stream->interrupted = PJ_FALSE; |
| 1993 | stream->rec_buf_count = 0; |
| 1994 | stream->play_buf_count = 0; |
| 1995 | stream->resample_buf_count = 0; |
| 1996 | |
| 1997 | if (stream->resample) { |
| 1998 | ostatus = AudioConverterReset(stream->resample); |
| 1999 | if (ostatus != noErr) |
| 2000 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 2001 | } |
| 2002 | |
| 2003 | #if !COREAUDIO_MAC |
| 2004 | AudioSessionSetActive(true); |
| 2005 | #endif |
| 2006 | |
| 2007 | for (i = 0; i < 2; i++) { |
| 2008 | if (stream->io_units[i] == NULL) break; |
| 2009 | ostatus = AudioOutputUnitStart(stream->io_units[i]); |
| 2010 | if (ostatus != noErr) { |
| 2011 | if (i == 1) |
| 2012 | AudioOutputUnitStop(stream->io_units[0]); |
| 2013 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 2014 | } |
| 2015 | } |
| 2016 | |
| 2017 | stream->running = PJ_TRUE; |
| 2018 | |
| 2019 | PJ_LOG(4, (THIS_FILE, "core audio stream started")); |
| 2020 | |
| 2021 | return PJ_SUCCESS; |
| 2022 | } |
| 2023 | |
| 2024 | /* API: Stop stream. */ |
| 2025 | static pj_status_t ca_stream_stop(pjmedia_aud_stream *strm) |
| 2026 | { |
| 2027 | struct coreaudio_stream *stream = (struct coreaudio_stream*)strm; |
| 2028 | OSStatus ostatus; |
| 2029 | unsigned i; |
| 2030 | int should_deactivate; |
| 2031 | struct stream_list *it, *itBegin; |
| 2032 | |
| 2033 | if (!stream->running) |
| 2034 | return PJ_SUCCESS; |
| 2035 | |
| 2036 | for (i = 0; i < 2; i++) { |
| 2037 | if (stream->io_units[i] == NULL) break; |
| 2038 | ostatus = AudioOutputUnitStop(stream->io_units[i]); |
| 2039 | if (ostatus != noErr) { |
| 2040 | if (i == 0 && stream->io_units[1]) |
| 2041 | AudioOutputUnitStop(stream->io_units[1]); |
| 2042 | return PJMEDIA_AUDIODEV_ERRNO_FROM_COREAUDIO(ostatus); |
| 2043 | } |
| 2044 | } |
| 2045 | |
| 2046 | /* Check whether we need to deactivate the audio session. */ |
| 2047 | pj_mutex_lock(stream->cf->mutex); |
| 2048 | pj_assert(!pj_list_empty(&stream->cf->streams)); |
| 2049 | pj_assert(!pj_list_empty(&stream->list_entry)); |
| 2050 | stream->running = PJ_FALSE; |
| 2051 | should_deactivate = PJ_TRUE; |
| 2052 | itBegin = &stream->cf->streams; |
| 2053 | for (it = itBegin->next; it != itBegin; it = it->next) { |
| 2054 | if (it->stream->running) { |
| 2055 | should_deactivate = PJ_FALSE; |
| 2056 | break; |
| 2057 | } |
| 2058 | } |
| 2059 | pj_mutex_unlock(stream->cf->mutex); |
| 2060 | |
| 2061 | #if !COREAUDIO_MAC |
| 2062 | if (should_deactivate) |
| 2063 | AudioSessionSetActive(false); |
| 2064 | #endif |
| 2065 | |
| 2066 | stream->quit_flag = 1; |
| 2067 | stream->play_thread_initialized = 0; |
| 2068 | stream->rec_thread_initialized = 0; |
| 2069 | pj_bzero(stream->rec_thread_desc, sizeof(pj_thread_desc)); |
| 2070 | pj_bzero(stream->play_thread_desc, sizeof(pj_thread_desc)); |
| 2071 | |
| 2072 | PJ_LOG(4, (THIS_FILE, "core audio stream stopped")); |
| 2073 | |
| 2074 | return PJ_SUCCESS; |
| 2075 | } |
| 2076 | |
| 2077 | |
| 2078 | /* API: Destroy stream. */ |
| 2079 | static pj_status_t ca_stream_destroy(pjmedia_aud_stream *strm) |
| 2080 | { |
| 2081 | struct coreaudio_stream *stream = (struct coreaudio_stream*)strm; |
| 2082 | unsigned i; |
| 2083 | |
| 2084 | PJ_ASSERT_RETURN(stream != NULL, PJ_EINVAL); |
| 2085 | |
| 2086 | ca_stream_stop(strm); |
| 2087 | |
| 2088 | for (i = 0; i < 2; i++) { |
| 2089 | if (stream->io_units[i]) { |
| 2090 | AudioUnitUninitialize(stream->io_units[i]); |
| 2091 | AudioComponentInstanceDispose(stream->io_units[i]); |
| 2092 | stream->io_units[i] = NULL; |
| 2093 | } |
| 2094 | } |
| 2095 | |
| 2096 | if (stream->resample) |
| 2097 | AudioConverterDispose(stream->resample); |
| 2098 | |
| 2099 | pj_mutex_lock(stream->cf->mutex); |
| 2100 | if (!pj_list_empty(&stream->list_entry)) |
| 2101 | pj_list_erase(&stream->list_entry); |
| 2102 | pj_mutex_unlock(stream->cf->mutex); |
| 2103 | |
| 2104 | pj_pool_release(stream->pool); |
| 2105 | |
| 2106 | return PJ_SUCCESS; |
| 2107 | } |
| 2108 | |
| 2109 | #endif /* PJMEDIA_AUDIO_DEV_HAS_COREAUDIO */ |