这篇教程C++ AudioUnitInitialize函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中AudioUnitInitialize函数的典型用法代码示例。如果您正苦于以下问题:C++ AudioUnitInitialize函数的具体用法?C++ AudioUnitInitialize怎么用?C++ AudioUnitInitialize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了AudioUnitInitialize函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: gst_core_audio_opengbooleangst_core_audio_open (GstCoreAudio * core_audio){ OSStatus status; /* core_audio->osxbuf is already locked at this point */ core_audio->cached_caps_valid = FALSE; gst_caps_replace (&core_audio->cached_caps, NULL); if (!gst_core_audio_open_impl (core_audio)) return FALSE; /* Add property listener */ status = AudioUnitAddPropertyListener (core_audio->audiounit, kAudioUnitProperty_AudioChannelLayout, _audio_unit_property_listener, core_audio); if (status != noErr) { GST_ERROR_OBJECT (core_audio, "Failed to add audio channel layout property " "listener for AudioUnit: %d", (int) status); } status = AudioUnitAddPropertyListener (core_audio->audiounit, kAudioUnitProperty_StreamFormat, _audio_unit_property_listener, core_audio); if (status != noErr) { GST_ERROR_OBJECT (core_audio, "Failed to add stream format property " "listener for AudioUnit: %d", (int) status); } /* Initialize the AudioUnit. We keep the audio unit initialized early so that * we can probe the underlying device. */ status = AudioUnitInitialize (core_audio->audiounit); if (status) { GST_ERROR_OBJECT (core_audio, "Failed to initialize AudioUnit: %d", (int) status); return FALSE; } return TRUE;}
开发者ID:thiblahute,项目名称:gst-plugins-good,代码行数:39,
示例2: gst_core_audio_initializegbooleangst_core_audio_initialize (GstCoreAudio * core_audio, AudioStreamBasicDescription format, GstCaps * caps, gboolean is_passthrough){ guint32 frame_size; OSStatus status; GST_DEBUG_OBJECT (core_audio, "Initializing: passthrough:%d caps:%" GST_PTR_FORMAT, is_passthrough, caps); if (!gst_core_audio_initialize_impl (core_audio, format, caps, is_passthrough, &frame_size)) { goto error; } if (core_audio->is_src) { /* create AudioBufferList needed for recording */ core_audio->recBufferList = buffer_list_alloc (format.mChannelsPerFrame, frame_size * format.mBytesPerFrame); } /* Initialize the AudioUnit */ status = AudioUnitInitialize (core_audio->audiounit); if (status) { GST_ERROR_OBJECT (core_audio, "Failed to initialise AudioUnit: %d", (int) status); goto error; } return TRUE;error: if (core_audio->is_src && core_audio->recBufferList) { buffer_list_free (core_audio->recBufferList); core_audio->recBufferList = NULL; } return FALSE;}
开发者ID:jcaden,项目名称:gst-plugins-good,代码行数:39,
示例3: AudioUnitInitializebool auLoader::initialize(){ /** Algorithm: **/ /** Call the AU's Initialize method **/ OSStatus err = AudioUnitInitialize(m_plugin); if(err != noErr) { debug(LOG_ERROR, "Could not initialize plugin"); return false; } /** Set up output buffers **/ m_buffer_list = (AudioBufferList *)malloc(offsetof(AudioBufferList, mBuffers[MAX_CHANNELS])); m_buffer_list->mNumberBuffers = MAX_CHANNELS; /** Connect input properties **/ AURenderCallbackStruct callback; callback.inputProc = this->inputCallback; callback.inputProcRefCon = this; /** Set up render notifications **/ err = AudioUnitSetProperty(m_plugin, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback)); if(err != noErr) { debug(LOG_WARN, "Could not configure inputs"); } err = AudioUnitAddRenderNotify(m_plugin, this->renderNotify, NULL); if(err != noErr) { debug(LOG_ERROR, "Could not set up render notification"); } debug(LOG_INFO, "AU initialized"); return true;}
开发者ID:Epitek,项目名称:KickMaker,代码行数:37,
示例4: iOSCoreAudioInitvoid iOSCoreAudioInit(){ if (!audioInstance) { OSErr err; // first, grab the default output AudioComponentDescription defaultOutputDescription; defaultOutputDescription.componentType = kAudioUnitType_Output; defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO; defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; defaultOutputDescription.componentFlags = 0; defaultOutputDescription.componentFlagsMask = 0; AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription); // create our instance err = AudioComponentInstanceNew(defaultOutput, &audioInstance); if (err != noErr) { audioInstance = nil; return; } // create our callback so we can give it the audio data AURenderCallbackStruct input; input.inputProc = iOSCoreAudioCallback; input.inputProcRefCon = NULL; err = AudioUnitSetProperty(audioInstance, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // setup the audio format we'll be using (stereo pcm) AudioStreamBasicDescription streamFormat; memset(&streamFormat, 0, sizeof(streamFormat)); streamFormat.mSampleRate = SAMPLE_RATE; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; streamFormat.mBitsPerChannel = sizeof(AudioSampleType) * 8; streamFormat.mChannelsPerFrame = 2; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame; streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket; err = AudioUnitSetProperty(audioInstance, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // k, all setup, so init err = AudioUnitInitialize(audioInstance); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // finally start playback err = AudioOutputUnitStart(audioInstance); if (err != noErr) { AudioUnitUninitialize(audioInstance); AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // we're good to go }}
开发者ID:173210,项目名称:ppsspp,代码行数:80,
示例5: AuHAL_open//.........这里部分代码省略......... psize = sizeof(AudioDeviceID); /* for input, select device AFTER enabling IO */ AudioUnitSetProperty(cdata->inunit,kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, psize); aunit = &(cdata->inunit); } else { AudioComponentInstanceNew(HALOutput, &(cdata->outunit)); psize = sizeof(AudioDeviceID); /* for output, select device BEFORE enabling IO */ AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, psize); enableIO = 1; AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)); aunit = &(cdata->outunit); } /* now set the buffer size */ psize = sizeof(AudioDeviceID); AudioUnitGetProperty(*aunit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, &psize); prop.mSelector = kAudioDevicePropertyBufferFrameSize; psize = 4; AudioObjectSetPropertyData(dev, &prop, 0, NULL, psize, &bufframes); psize = sizeof(maxFPS); AudioUnitGetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, isInput, &maxFPS, &psize); AudioUnitSetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, isInput, &bufframes, sizeof(UInt32)); /* set the stream properties */ psize = sizeof(AudioStreamBasicDescription); AudioUnitGetProperty(*aunit, kAudioUnitProperty_StreamFormat, (isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input), isInput, &format, &psize); format.mSampleRate = srate; format.mFormatID = kAudioFormatLinearPCM; format.mFormatFlags = kAudioFormatFlagsCanonical | kLinearPCMFormatFlagIsNonInterleaved; format.mBytesPerPacket = sizeof(AudioUnitSampleType); format.mFramesPerPacket = 1; format.mBytesPerFrame = sizeof(AudioUnitSampleType); format.mChannelsPerFrame = nchnls; format.mBitsPerChannel = sizeof(AudioUnitSampleType)*8; AudioUnitSetProperty(*aunit, kAudioUnitProperty_StreamFormat, (isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input), isInput, &format, sizeof(AudioStreamBasicDescription)); /* set the callbacks and open the device */ if(!isInput) { AURenderCallbackStruct output; output.inputProc = Csound_Render; output.inputProcRefCon = cdata; AudioUnitSetProperty(*aunit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, isInput, &output, sizeof(output)); AudioUnitInitialize(*aunit); AudioOutputUnitStart(*aunit); csound->Message(csound, Str("***** AuHAL module: output device open with %d " "buffer frames/n"), bufframes); cdata->disp = 0; } else { AURenderCallbackStruct input; AudioBufferList *CAInputData = (AudioBufferList*)malloc(sizeof(UInt32) + cdata->inchnls * sizeof(AudioBuffer)); CAInputData->mNumberBuffers = cdata->inchnls; for (i = 0; i < cdata->inchnls; i++) { CAInputData->mBuffers[i].mNumberChannels = 1; CAInputData->mBuffers[i].mDataByteSize = bufframes * sizeof(AudioUnitSampleType); CAInputData->mBuffers[i].mData = calloc(bufframes, sizeof(AudioUnitSampleType)); } cdata->inputdata = CAInputData; input.inputProc = Csound_Input; input.inputProcRefCon = cdata; AudioUnitSetProperty(*aunit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, isInput, &input, sizeof(input)); AudioUnitInitialize(*aunit); AudioOutputUnitStart(*aunit); csound->Message(csound, "***** AuHAL module: input device open with %d buffer frames/n", bufframes); } if(!cdata->disp) csound->Message(csound, "==========================================================/n"); cdata->disp = 0; return 0;}
开发者ID:BlakeJarvis,项目名称:csound,代码行数:101,
示例6: memsetint CoreAudioDriver::init( unsigned bufferSize ){ OSStatus err = noErr; m_pOut_L = new float[ m_nBufferSize ]; m_pOut_R = new float[ m_nBufferSize ]; memset ( m_pOut_L, 0, m_nBufferSize * sizeof( float ) ); memset ( m_pOut_R, 0, m_nBufferSize * sizeof( float ) ); // Get Component AudioComponent compOutput; AudioComponentDescription descAUHAL; descAUHAL.componentType = kAudioUnitType_Output; descAUHAL.componentSubType = kAudioUnitSubType_HALOutput; descAUHAL.componentManufacturer = kAudioUnitManufacturer_Apple; descAUHAL.componentFlags = 0; descAUHAL.componentFlagsMask = 0; compOutput = AudioComponentFindNext( NULL, &descAUHAL ); if ( compOutput == NULL ) { ERRORLOG( "Error in FindNextComponent" ); //exit (-1); } err = AudioComponentInstanceNew( compOutput, &m_outputUnit ); if ( err != noErr ) { ERRORLOG( "Error Opening Component" ); } // Get Current Output Device retrieveDefaultDevice(); // Set AUHAL to Current Device err = AudioUnitSetProperty( m_outputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &m_outputDevice, sizeof( m_outputDevice ) ); if ( err != noErr ) { ERRORLOG( "Could not set Current Device" ); } AudioStreamBasicDescription asbdesc; asbdesc.mSampleRate = ( Float64 )m_nSampleRate; asbdesc.mFormatID = kAudioFormatLinearPCM; asbdesc.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; asbdesc.mBytesPerPacket = sizeof( Float32 ); asbdesc.mFramesPerPacket = 1; asbdesc.mBytesPerFrame = sizeof( Float32 ); asbdesc.mChannelsPerFrame = 2; // comix: was set to 1 asbdesc.mBitsPerChannel = 32; err = AudioUnitSetProperty( m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbdesc, sizeof( AudioStreamBasicDescription ) );// Set Render Callback AURenderCallbackStruct out; out.inputProc = renderProc; out.inputProcRefCon = ( void * )this; err = AudioUnitSetProperty( m_outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &out, sizeof( out ) ); if ( err != noErr ) { ERRORLOG( "Could not Set Render Callback" ); } //Initialize AUHAL err = AudioUnitInitialize( m_outputUnit ); if ( err != noErr ) { ERRORLOG( "Could not Initialize AudioUnit" ); } return 0;}
开发者ID:AdamFf,项目名称:hydrogen,代码行数:93,
示例7: RingBuffer//.........这里部分代码省略......... if (audioComponent == NULL) { return; } error = AudioComponentInstanceNew(audioComponent, &_au); if (error != noErr) { return; } } else { ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } }#else ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; }#endif // Set the render callback AURenderCallbackStruct callback; callback.inputProc = &CoreAudioOutputRenderCallback; callback.inputProcRefCon = _buffer; error = AudioUnitSetProperty(_au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback) ); if(error != noErr) { return; } // Set up the audio unit for audio streaming AudioStreamBasicDescription outputFormat; outputFormat.mSampleRate = SPU_SAMPLE_RATE; outputFormat.mFormatID = kAudioFormatLinearPCM; outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked; outputFormat.mBytesPerPacket = SPU_SAMPLE_SIZE; outputFormat.mFramesPerPacket = 1; outputFormat.mBytesPerFrame = SPU_SAMPLE_SIZE; outputFormat.mChannelsPerFrame = SPU_NUMBER_CHANNELS; outputFormat.mBitsPerChannel = SPU_SAMPLE_RESOLUTION; error = AudioUnitSetProperty(_au, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputFormat, sizeof(outputFormat) ); if(error != noErr) { return; } // Initialize our new audio unit error = AudioUnitInitialize(_au); if(error != noErr) { return; }}
开发者ID:MoochMcGee,项目名称:desmume-plus,代码行数:101,
示例8: sa_stream_openintsa_stream_open(sa_stream_t *s) { ComponentDescription desc; Component comp; AURenderCallbackStruct input; AudioStreamBasicDescription fmt; if (s == NULL) { return SA_ERROR_NO_INIT; } if (s->output_unit != NULL) { return SA_ERROR_INVALID; } /* * Open the default audio output unit. */ desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); if (comp == NULL) { return SA_ERROR_NO_DEVICE; } if (OpenAComponent(comp, &s->output_unit) != noErr) { return SA_ERROR_NO_DEVICE; } /* * Set up the render callback used to feed audio data into the output unit. */ input.inputProc = audio_callback; input.inputProcRefCon = s; if (AudioUnitSetProperty(s->output_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)) != 0) { return SA_ERROR_SYSTEM; } /* * Set up the format description for our audio data. Apple uses the * following terminology: * * sample = a single data value for one channel * frame = a set of samples that includes one sample for each channel * packet = the smallest indivisible block of audio data; for uncompressed * audio (which is what we have), this is one frame * rate = the number of complete frames per second * * Note that this definition of frame differs from, well, pretty much everyone * else's. See this really long link for more info: * * http://developer.apple.com/documentation/MusicAudio/Reference/CoreAudioDataTypesRef/Reference/reference.html#//apple_ref/c/tdef/AudioStreamBasicDescription */ fmt.mFormatID = kAudioFormatLinearPCM; fmt.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |#ifdef __BIG_ENDIAN__ kLinearPCMFormatFlagIsBigEndian |#endif kLinearPCMFormatFlagIsPacked; fmt.mSampleRate = s->rate; fmt.mChannelsPerFrame = s->n_channels; fmt.mBitsPerChannel = s->bytes_per_ch * 8; fmt.mFramesPerPacket = 1; /* uncompressed audio */ fmt.mBytesPerFrame = fmt.mChannelsPerFrame * fmt.mBitsPerChannel / 8; fmt.mBytesPerPacket = fmt.mBytesPerFrame * fmt.mFramesPerPacket; /* * We're feeding data in to the output bus of the audio system, so we set * the format description on the input scope of the device, using the very * obvious element value of 0 to indicate the output bus. * * http://developer.apple.com/technotes/tn2002/tn2091.html */ if (AudioUnitSetProperty(s->output_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &fmt, sizeof(AudioStreamBasicDescription)) != 0) { return SA_ERROR_NOT_SUPPORTED; } if (AudioUnitInitialize(s->output_unit) != 0) { return SA_ERROR_SYSTEM; } return SA_SUCCESS;}
开发者ID:AshishNamdev,项目名称:mozilla-central,代码行数:89,
示例9: audiounit_stream_init//.........这里部分代码省略......... ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0;#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc);#else comp = AudioComponentFindNext(NULL, &desc);#endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0;#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit);#else r = AudioComponentInstanceNew(comp, &stm->unit);#endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK;}
开发者ID:BrunoReX,项目名称:palemoon,代码行数:101,
示例10: zeromem//.........这里部分代码省略.........#endif AudioStreamBasicDescription strdesc; zeromem(&strdesc, sizeof(strdesc)); UInt32 size = sizeof(strdesc); result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &strdesc, &size); ERR_FAIL_COND_V(result != noErr, FAILED); switch (strdesc.mChannelsPerFrame) { case 2: // Stereo case 4: // Surround 3.1 case 6: // Surround 5.1 case 8: // Surround 7.1 channels = strdesc.mChannelsPerFrame; break; default: // Unknown number of channels, default to stereo channels = 2; break; } zeromem(&strdesc, sizeof(strdesc)); size = sizeof(strdesc); result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size); ERR_FAIL_COND_V(result != noErr, FAILED); switch (strdesc.mChannelsPerFrame) { case 1: // Mono capture_channels = 1; break; case 2: // Stereo capture_channels = 2; break; default: // Unknown number of channels, default to stereo capture_channels = 2; break; } mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE); zeromem(&strdesc, sizeof(strdesc)); strdesc.mFormatID = kAudioFormatLinearPCM; strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; strdesc.mChannelsPerFrame = channels; strdesc.mSampleRate = mix_rate; strdesc.mFramesPerPacket = 1; strdesc.mBitsPerChannel = 16; strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8; strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); strdesc.mChannelsPerFrame = capture_channels; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY); // Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels) buffer_frames = closest_power_of_2(latency * mix_rate / 1000);#ifdef OSX_ENABLED result = AudioUnitSetProperty(audio_unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, kOutputBus, &buffer_frames, sizeof(UInt32)); ERR_FAIL_COND_V(result != noErr, FAILED);#endif unsigned int buffer_size = buffer_frames * channels; samples_in.resize(buffer_size); input_buf.resize(buffer_size); input_buffer.resize(buffer_size * 8); input_position = 0; input_size = 0; print_verbose("CoreAudio: detected " + itos(channels) + " channels"); print_verbose("CoreAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms"); AURenderCallbackStruct callback; zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverCoreAudio::output_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverCoreAudio::input_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); result = AudioUnitInitialize(audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); return OK;}
开发者ID:DSeanLaw,项目名称:godot,代码行数:101,
示例11: FindNextComponentvoid *runPluginLoop(void *plug) { AudioUnit outputUnit; OSStatus err = noErr; // Open the default output unit ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if(comp == NULL) { debug(LOG_ERROR, "FindNextComponent failed"); return NULL; } err = OpenAComponent(comp, &outputUnit); if(comp == NULL) { debug(LOG_ERROR, "OpenAComponent failed with error code %ld/n", err); return NULL; } // Set up a callback function to generate output to the output unit AURenderCallbackStruct input; input.inputProc = processData; input.inputProcRefCon = plug; err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); AudioStreamBasicDescription streamFormat; streamFormat.mSampleRate = DEF_SAMPLE_RATE; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved; streamFormat.mBytesPerPacket = 2; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = 2; streamFormat.mChannelsPerFrame = 2; streamFormat.mBitsPerChannel = 16; err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if(err) { debug(LOG_ERROR, "AudioUnitSetProperty-SF failed with code %4.4s, %ld/n", (char*)&err, err); return NULL; } // Initialize unit err = AudioUnitInitialize(outputUnit); if(err) { debug(LOG_ERROR, "AudioUnitInitialize failed with code %ld/n", err); return NULL; } Float64 outSampleRate; UInt32 size = sizeof(Float64); err = AudioUnitGetProperty(outputUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &outSampleRate, &size); if(err) { debug(LOG_ERROR, "AudioUnitSetProperty-GF failed with code %4.4s, %ld/n", (char*)&err, err); return NULL; } // Start the rendering // The DefaultOutputUnit will do any format conversions to the format of the default device err = AudioOutputUnitStart(outputUnit); if(err) { debug(LOG_ERROR, "AudioOutputUnitStart failed with code %ld/n", err); return NULL; } // Loop until this thread is killed CFRunLoopRun(); // REALLY after you're finished playing STOP THE AUDIO OUTPUT UNIT!!!!!! // but we never get here because we're running until the process is nuked... AudioOutputUnitStop(outputUnit); err = AudioUnitUninitialize(outputUnit); if(err) { debug(LOG_ERROR, "AudioUnitUninitialize failed with code %ld/n", err); return NULL; } return NULL;}
开发者ID:Epitek,项目名称:KickMaker,代码行数:92,
示例12: definedvoid InputImplAudioUnit::setup() { if( mIsSetup ) return; OSStatus err = noErr; //get default input device if( ! mDevice ) { mDevice = InputImplAudioUnit::getDefaultDevice(); } //create AudioOutputUnit AudioComponent component; AudioComponentDescription description; description.componentType = kAudioUnitType_Output;#if defined( CINDER_MAC ) description.componentSubType = kAudioUnitSubType_HALOutput;#elif defined( CINDER_COCOA_TOUCH ) description.componentSubType = kAudioUnitSubType_RemoteIO;#endif description.componentManufacturer = kAudioUnitManufacturer_Apple; description.componentFlags = 0; description.componentFlagsMask = 0; component = AudioComponentFindNext( NULL, &description ); if( ! component ) { std::cout << "Error finding next component" << std::endl; throw; } err = AudioComponentInstanceNew( component, &mInputUnit ); if( err != noErr ) { mInputUnit = NULL; std::cout << "Error getting output unit" << std::endl; throw; } // Initialize the AU /*err = AudioUnitInitialize( mInputUnit ); if(err != noErr) { std::cout << "failed to initialize HAL Output AU" << std::endl; throw; }*/ UInt32 param; //enable IO on AudioUnit's input scope param = 1; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof( UInt32 ) ); if( err != noErr ) { std::cout << "Error enable IO on Output unit input" << std::endl; throw; } //disable IO on AudioUnit's output scope param = 0; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof( UInt32 ) ); if( err != noErr ) { std::cout << "Error disabling IO on Output unit output" << std::endl; throw; } #if defined( CINDER_MAC ) AudioDeviceID nativeDeviceId = static_cast<AudioDeviceID>( mDevice->getDeviceId() ); // Set the current device to the default input unit. err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &nativeDeviceId, sizeof(AudioDeviceID) ); if( err != noErr ) { std::cout << "failed to set AU input device" << std::endl; throw; }#endif AURenderCallbackStruct callback; callback.inputProc = InputImplAudioUnit::inputCallback; callback.inputProcRefCon = this; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct) ); //Don't setup buffers until you know what the //input and output device audio streams look like. // Initialize the AudioUnit err = AudioUnitInitialize( mInputUnit ); if(err != noErr) { std::cout << "failed to initialize HAL Output AU" << std::endl; throw; } //Get Size of IO Buffers uint32_t sampleCount; param = sizeof(UInt32);#if defined( CINDER_MAC ) err = AudioUnitGetProperty( mInputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &sampleCount, ¶m ); if( err != noErr ) { std::cout << "Error getting buffer frame size" << std::endl; throw; }#elif defined( CINDER_COCOA_TOUCH ) AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &sampleCount, ¶m );//.........这里部分代码省略.........
开发者ID:AaronMeyers,项目名称:Cinder,代码行数:101,
示例13: input_init//.........这里部分代码省略......... size = sizeof(AudioDeviceID); AudioDeviceID inputDevice; err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &size, &inputDevice); if (err) exit(err); err =AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(inputDevice)); if (err) exit(err); AudioStreamBasicDescription DeviceFormat; AudioStreamBasicDescription DesiredFormat; //Use CAStreamBasicDescriptions instead of 'naked' //AudioStreamBasicDescriptions to minimize errors. //CAStreamBasicDescription.h can be found in the CoreAudio SDK. size = sizeof(AudioStreamBasicDescription); //Get the input device format AudioUnitGetProperty (auHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &DeviceFormat, &size); //set the desired format to the device's sample rate memcpy(&DesiredFormat, &DeviceFormat, sizeof(AudioStreamBasicDescription)); sampling_rate = DeviceFormat.mSampleRate; // for laser-emulating filters DesiredFormat.mSampleRate = DeviceFormat.mSampleRate; DesiredFormat.mChannelsPerFrame = 4; DesiredFormat.mBitsPerChannel = 16; DesiredFormat.mBytesPerPacket = DesiredFormat.mBytesPerFrame = DesiredFormat.mChannelsPerFrame * 2; DesiredFormat.mFramesPerPacket = 1; DesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; //set format to output scope err = AudioUnitSetProperty( auHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &DesiredFormat, sizeof(AudioStreamBasicDescription)); if (err) exit(err); SInt32 *channelMap =NULL; UInt32 numOfChannels = DesiredFormat.mChannelsPerFrame; //2 channels UInt32 mapSize = numOfChannels *sizeof(SInt32); channelMap = (SInt32 *)malloc(mapSize); //for each channel of desired input, map the channel from //the device's output channel. for(i=0;i<numOfChannels;i++) { channelMap[i]=i; } err = AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Output, 1, channelMap, size); if (err) exit(err); free(channelMap); AURenderCallbackStruct input; input.inputProc = callback; input.inputProcRefCon = 0; err = AudioUnitSetProperty( auHAL, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (err) exit(err); err = AudioUnitInitialize(auHAL); if (err) exit(err); err = AudioOutputUnitStart(auHAL); if (err) exit(err);}
开发者ID:abrasive,项目名称:lazer,代码行数:101,
示例14: cubeb_stream_init//.........这里部分代码省略......... latency < 1 || latency > 2000) { return CUBEB_ERROR_INVALID_FORMAT; } memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; r = OpenAComponent(comp, &stm->unit); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: OpenAComponent returned %ld/n", (long) r); } assert(r == 0); input.inputProc = audio_unit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(SetRenderCallback) returned %ld/n", (long) r); } assert(r == 0); r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(StreamFormat) returned %ld/n", (long) r); } assert(r == 0); buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitInitialize returned %ld/n", (long) r); } assert(r == 0); *stream = stm; return CUBEB_OK;}
开发者ID:sachilaRanawaka,项目名称:integration-mozilla-inbound,代码行数:101,
示例15: audiounit_stream_init//.........这里部分代码省略.........#endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; stm->current_latency_frames = 0; stm->hw_latency_frames = UINT64_MAX;#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit);#else r = AudioComponentInstanceNew(comp, &stm->unit);#endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = latency / 1000.0 * ss.mSampleRate; /* Get the range of latency this particular device can work with, and clamp * the requested latency to this acceptable range. */ if (audiounit_get_acceptable_latency_range(&latency_range) != CUBEB_OK) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } if (buffer_size < (unsigned int) latency_range.mMinimum) { buffer_size = (unsigned int) latency_range.mMinimum; } else if (buffer_size > (unsigned int) latency_range.mMaximum) { buffer_size = (unsigned int) latency_range.mMaximum; } /** * Get the default buffer size. If our latency request is below the default, * set it. Otherwise, use the default latency. **/ size = sizeof(default_buffer_size); r = AudioUnitGetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &default_buffer_size, &size); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } // Setting the latency doesn't work well for USB headsets (eg. plantronics). // Keep the default latency for now.#if 0 if (buffer_size < default_buffer_size) { /* Set the maximum number of frame that the render callback will ask for, * effectively setting the latency of the stream. This is process-wide. */ r = AudioUnitSetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &buffer_size, sizeof(buffer_size)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } }#endif r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK;}
开发者ID:JuannyWang,项目名称:gecko-dev,代码行数:101,
示例16: Core_OpenAudioint Core_OpenAudio(_THIS, SDL_AudioSpec *spec){ OSStatus result = noErr; Component comp; ComponentDescription desc; struct AURenderCallbackStruct callback; AudioStreamBasicDescription requestedDesc; /* Setup a AudioStreamBasicDescription with the requested format */ requestedDesc.mFormatID = kAudioFormatLinearPCM; requestedDesc.mFormatFlags = kLinearPCMFormatFlagIsPacked; requestedDesc.mChannelsPerFrame = spec->channels; requestedDesc.mSampleRate = spec->freq; requestedDesc.mBitsPerChannel = spec->format & 0xFF; if (spec->format & 0x8000) requestedDesc.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger; if (spec->format & 0x1000) requestedDesc.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; requestedDesc.mFramesPerPacket = 1; requestedDesc.mBytesPerFrame = requestedDesc.mBitsPerChannel * requestedDesc.mChannelsPerFrame / 8; requestedDesc.mBytesPerPacket = requestedDesc.mBytesPerFrame * requestedDesc.mFramesPerPacket; /* Locate the default output audio unit */ desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent (NULL, &desc); if (comp == NULL) { SDL_SetError ("Failed to start CoreAudio: FindNextComponent returned NULL"); return -1; } /* Open & initialize the default output audio unit */ result = OpenAComponent (comp, &outputAudioUnit); CHECK_RESULT("OpenAComponent") result = AudioUnitInitialize (outputAudioUnit); CHECK_RESULT("AudioUnitInitialize") /* Set the input format of the audio unit. */ result = AudioUnitSetProperty (outputAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &requestedDesc, sizeof (requestedDesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)") /* Set the audio callback */ callback.inputProc = audioCallback; callback.inputProcRefCon = this; result = AudioUnitSetProperty (outputAudioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_SetInputCallback)") /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(spec); /* Allocate a sample buffer */ bufferOffset = bufferSize = this->spec.size; buffer = SDL_malloc(bufferSize); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart (outputAudioUnit); CHECK_RESULT("AudioOutputUnitStart") /* We're running! */ return(1);}
开发者ID:ahpho,项目名称:wowmapviewer,代码行数:80,
示例17: audio_unit_open//.........这里部分代码省略......... // Set the current device CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, output_bus, &d->dev, sizeof(AudioDeviceID))); } param=0; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_ShouldAllocateBuffer, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output , is_read ? input_bus : output_bus , ¶m, sizeof(param))); UInt32 asbdsize = sizeof(AudioStreamBasicDescription); memset((char *)&asbd, 0, asbdsize); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output, is_read ? input_bus : output_bus, &asbd, &asbdsize)); show_format(is_read ? "Input audio unit" : "Output audio unit",&asbd); asbd.mSampleRate=d->rate; asbd.mBytesPerPacket=asbd.mBytesPerFrame = 2*d->nchannels; asbd.mChannelsPerFrame = d->nchannels; asbd.mBitsPerChannel=16; asbd.mFormatID=kAudioFormatLinearPCM; asbd.mFormatFlags=kAudioFormatFlagIsPacked|kAudioFormatFlagIsSignedInteger; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, sizeof(AudioStreamBasicDescription))); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, &asbdsize)); show_format(is_read ? "Input audio unit after configuration" : "Output audio unit after configuration",&asbd); // Get the number of frames in the IO buffer(s) param = sizeof(UInt32); UInt32 numFrames; CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Input, input_bus, &numFrames, ¶m)); ms_message("Number of frames per buffer = %i", numFrames); AURenderCallbackStruct cbs; cbs.inputProcRefCon = d; if (is_read) { cbs.inputProc = readRenderProc; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, input_bus, &cbs, sizeof(AURenderCallbackStruct))); } else { cbs.inputProc = writeRenderProc; CHECK_AURESULT(AudioUnitSetProperty (d->au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, output_bus, &cbs, sizeof(AURenderCallbackStruct))); } result = AudioUnitInitialize(d->au); if(result != noErr) { ms_error("failed to AudioUnitInitialize %i , is_read=%i", result,(int)is_read); return -1; } CHECK_AURESULT(AudioOutputUnitStart(d->au)); return 0;}
开发者ID:flybird119,项目名称:meetphone,代码行数:101,
示例18: FindNextComponent//_______________________________________________//////_______________________________________________uint8_t coreAudioDevice::init(uint8_t channels, uint32_t fq) {_channels = channels;OSStatus err;ComponentDescription desc;AudioUnitInputCallback input;AudioStreamBasicDescription streamFormat;AudioDeviceID theDevice;UInt32 sz=0;UInt32 kFramesPerSlice=512; desc.componentType = 'aunt'; desc.componentSubType = kAudioUnitSubType_Output; desc.componentManufacturer = kAudioUnitID_DefaultOutput; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp= FindNextComponent(NULL, &desc); if (comp == NULL) { printf("coreAudio: Cannot find component/n"); return 0; } err = OpenAComponent(comp, &theOutputUnit); if(err) { printf("coreAudio: Cannot open component/n"); return 0; } // Initialize it verify_noerr(AudioUnitInitialize(theOutputUnit)); // Set up a callback function to generate output to the output unit#if 1 input.inputProc = MyRenderer; input.inputProcRefCon = NULL; verify_noerr(AudioUnitSetProperty(theOutputUnit, kAudioUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)));#endif streamFormat.mSampleRate = fq; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked; streamFormat.mBytesPerPacket = channels * sizeof (UInt16); streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = channels * sizeof (UInt16); streamFormat.mChannelsPerFrame = channels; streamFormat.mBitsPerChannel = sizeof (UInt16) * 8; verify_noerr(AudioUnitSetProperty( theOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription))); printf("Rendering source:/n/t"); printf ("SampleRate=%f,", streamFormat.mSampleRate); printf ("BytesPerPacket=%ld,", streamFormat.mBytesPerPacket); printf ("FramesPerPacket=%ld,", streamFormat.mFramesPerPacket); printf ("BytesPerFrame=%ld,", streamFormat.mBytesPerFrame); printf ("BitsPerChannel=%ld,", streamFormat.mBitsPerChannel); printf ("ChannelsPerFrame=%ld/n", streamFormat.mChannelsPerFrame); sz=sizeof (theDevice); verify_noerr(AudioUnitGetProperty (theOutputUnit, kAudioOutputUnitProperty_CurrentDevice, 0, 0, &theDevice, &sz)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceSetProperty(theDevice, 0, 0, false, kAudioDevicePropertyBufferFrameSize, sz, &kFramesPerSlice)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceGetProperty(theDevice, 0, false, kAudioDevicePropertyBufferFrameSize, &sz, &kFramesPerSlice)); verify_noerr (AudioDeviceAddPropertyListener(theDevice, 0, false, kAudioDeviceProcessorOverload, OverloadListenerProc, 0)); printf ("size of the device's buffer = %ld frames/n", kFramesPerSlice); frameCount=0; audioBuffer=new int16_t[BUFFER_SIZE]; // between hald a sec and a sec should be enough :) return 1;}
开发者ID:BackupTheBerlios,项目名称:avidemux-svn,代码行数:99,
示例19: macosx_audio_open//.........这里部分代码省略......... if (err != noErr) { debug_msg("error AudioUnitSetProperty1 with error %ld: %s/n", err, GetMacOSStatusErrorString(err)); return 0; } // Define the Mash stream description. Mash puts 20ms of data into each read // and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char, // so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert // to 16-bit linear before using the audio data. devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0; //devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate; devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM;#ifdef WORDS_BIGENDIAN devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked;#else devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;#endif devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2; devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1; devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2; devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1; devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16; // Inform the default output unit of our source format. err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty2"); printf("error setting output unit source format/n"); return 0; } // check the stream format err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable); if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat/n"); err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize); if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat/n"); char name[128]; audio_format_name(ifmt, name, 128); debug_msg("Requested ifmt %s/n",name); debug_msg("ifmt bytes pre block: %d/n",ifmt->bytes_per_block); // handle the requested format if (ifmt->encoding != DEV_S16) { audio_format_change_encoding(ifmt, DEV_S16); debug_msg("Requested ifmt changed to %s/n",name); debug_msg("ifmt bytes pre block: %d/n",ifmt->bytes_per_block); } audio_format_name(ofmt, name, 128); debug_msg("Requested ofmt %s/n",name); debug_msg("ofmt bytes pre block: %d/n",ofmt->bytes_per_block); // Allocate the read buffer and Z delay line. //readBufferSize_ = 8192; readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_; //readBufferSize_ = 320; //printf("readBufferSize_ %d/n", readBufferSize_); readBuffer_ = malloc(sizeof(u_char)*readBufferSize_); bzero(readBuffer_, readBufferSize_ * sizeof(u_char)); //memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_); //inputReadIndex_ = -1; inputReadIndex_ = 0; inputWriteIndex_ = 0; zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80); availableInput_ = 0; // Allocate the write buffer. //writeBufferSize_ = 8000; writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_; writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_); bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16)); outputReadIndex_ = 0; outputWriteIndex_ = 0; //outputWriteIndex_ = -1; // Start audio processing. err = AudioUnitInitialize(devices[ad].outputUnit_); if (err != noErr) { debug_msg("error AudioUnitInitialize/n"); return 0; } err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc); if (err != noErr) { fprintf(stderr, "Input device error: AudioDeviceStart/n"); return 0; } err = AudioOutputUnitStart(devices[ad].outputUnit_); if (err != noErr) { fprintf(stderr, "Output device error: AudioOutputUnitStart/n"); return 0; } // Inform the default output unit of our source format. /* err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty3"); return 0; } */ return 1;};
开发者ID:JensenSung,项目名称:rat,代码行数:101,
示例20: configure void configure(const AudioStreamBasicDescription& outDesc, UInt32 bufferSize) { // enable IO on input UInt32 param = 1; OSErr result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof(UInt32)); ASSERT(!result); // disable IO on output param = 0; result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof(UInt32)); ASSERT(!result); #if !TARGET_OS_IPHONE // set to use default device AudioDeviceID deviceId = kAudioObjectUnknown; param = sizeof(AudioDeviceID); AudioObjectPropertyAddress property_address = { kAudioHardwarePropertyDefaultInputDevice, // mSelector kAudioObjectPropertyScopeGlobal, // mScope kAudioObjectPropertyElementMaster // mElement }; UInt32 deviceIdSize = sizeof(deviceId); result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property_address, 0, // inQualifierDataSize NULL, // inQualifierData &deviceIdSize, &deviceId); ASSERT(!result); result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceId, sizeof(AudioDeviceID)); ASSERT(!result);#endif // configure the callback AURenderCallbackStruct callback; callback.inputProc = inputCallback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct)); ASSERT(!result); // make the input buffer size match the output buffer size UInt32 bufferSizeVal = bufferSize;#if TARGET_OS_IPHONE result = AudioUnitSetProperty(m_inputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &bufferSizeVal, sizeof(bufferSizeVal));#else result = AudioUnitSetProperty(m_inputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &bufferSizeVal, sizeof(bufferSizeVal));#endif ASSERT(!result); // Initialize the AudioUnit result = AudioUnitInitialize(m_inputUnit); ASSERT(!result); // get Size of IO Buffers UInt32 sampleCount; param = sizeof(UInt32);#if TARGET_OS_IPHONE result = AudioUnitGetProperty(m_inputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &sampleCount, ¶m);#else result = AudioUnitGetProperty(m_inputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &sampleCount, ¶m);#endif ASSERT(!result); // The AudioUnit can do format conversions, so match the input configuration to the output. //// if this doesn't work try it the other way around - set up the input desc and force the output to match param = sizeof(AudioStreamBasicDescription); result = AudioUnitSetProperty(m_inputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outDesc, param); ASSERT(!result); m_audioBus = new AudioBus(2, bufferSize, true); m_buffers = (AudioBufferList*) malloc(offsetof(AudioBufferList, mBuffers[0]) + sizeof(AudioBuffer) * outDesc.mChannelsPerFrame); m_buffers->mNumberBuffers = outDesc.mChannelsPerFrame; for (uint32_t i = 0; i < m_buffers->mNumberBuffers; ++i) { m_buffers->mBuffers[i].mNumberChannels = 1; m_buffers->mBuffers[i].mDataByteSize = bufferSize * outDesc.mBytesPerFrame; m_buffers->mBuffers[i].mData = m_audioBus->channel(i)->mutableData(); } }
开发者ID:cor3ntin,项目名称:LabSound,代码行数:80,
示例21: ca_open_capturestatic ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName){ AudioStreamBasicDescription requestedFormat; // The application requested format AudioStreamBasicDescription hardwareFormat; // The hardware format AudioStreamBasicDescription outputFormat; // The AudioUnit output format AURenderCallbackStruct input; ComponentDescription desc; AudioDeviceID inputDevice; UInt32 outputFrameCount; UInt32 propertySize; UInt32 enableIO; Component comp; ca_data *data; OSStatus err; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; // Search for component with given description comp = FindNextComponent(NULL, &desc); if(comp == NULL) { ERR("FindNextComponent failed/n"); return ALC_INVALID_VALUE; } data = calloc(1, sizeof(*data)); device->ExtraData = data; // Open the component err = OpenAComponent(comp, &data->audioUnit); if(err != noErr) { ERR("OpenAComponent failed/n"); goto error; } // Turn off AudioUnit output enableIO = 0; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed/n"); goto error; } // Turn on AudioUnit input enableIO = 1; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed/n"); goto error; } // Get the default input device propertySize = sizeof(AudioDeviceID); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice); if(err != noErr) { ERR("AudioHardwareGetProperty failed/n"); goto error; } if(inputDevice == kAudioDeviceUnknown) { ERR("No input device found/n"); goto error; } // Track the input device err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID)); if(err != noErr) { ERR("AudioUnitSetProperty failed/n"); goto error; } // set capture callback input.inputProc = ca_capture_callback; input.inputProcRefCon = device; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct)); if(err != noErr) { ERR("AudioUnitSetProperty failed/n"); goto error; } // Initialize the device err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed/n"); goto error; }//.........这里部分代码省略.........
开发者ID:carriercomm,项目名称:openal-soft-chowdren,代码行数:101,
示例22: OFXAU_RET_FALSE// ----------------------------------------------------------bool ofxAudioUnitInput::configureInputDevice()// ----------------------------------------------------------{ UInt32 on = 1; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &on, sizeof(on)), "enabling input on HAL unit"); UInt32 off = 0; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &off, sizeof(off)), "disabling output on HAL unit"); UInt32 deviceIDSize = sizeof(AudioDeviceID); OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &_impl->inputDeviceID, deviceIDSize), "setting HAL unit's device ID"); AudioStreamBasicDescription deviceASBD = {0}; UInt32 ASBDSize = sizeof(deviceASBD); OFXAU_RET_FALSE(AudioUnitGetProperty(*_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &deviceASBD, &ASBDSize), "getting hardware stream format"); deviceASBD.mSampleRate = 44100; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &deviceASBD, sizeof(deviceASBD)), "setting input sample rate to 44100"); AURenderCallbackStruct inputCallback = {RenderCallback, &_impl->ctx}; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &inputCallback, sizeof(inputCallback)), "setting hardware input callback"); OFXAU_RET_BOOL(AudioUnitInitialize(*_unit), "initializing hardware input unit after setting it to input mode");}
开发者ID:microcosm,项目名称:ofxAudioUnit,代码行数:64,
示例23: FindNextComponentbool CoreAudioSound::Start(){ OSStatus err; AURenderCallbackStruct callback_struct; AudioStreamBasicDescription format; ComponentDescription desc; Component component; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentFlags = 0; desc.componentFlagsMask = 0; desc.componentManufacturer = kAudioUnitManufacturer_Apple; component = FindNextComponent(nullptr, &desc); if (component == nullptr) { ERROR_LOG(AUDIO, "error finding audio component"); return false; } err = OpenAComponent(component, &audioUnit); if (err != noErr) { ERROR_LOG(AUDIO, "error opening audio component"); return false; } FillOutASBDForLPCM(format, m_mixer->GetSampleRate(), 2, 16, 16, false, false, false); err = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &format, sizeof(AudioStreamBasicDescription)); if (err != noErr) { ERROR_LOG(AUDIO, "error setting audio format"); return false; } callback_struct.inputProc = callback; callback_struct.inputProcRefCon = this; err = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback_struct, sizeof callback_struct); if (err != noErr) { ERROR_LOG(AUDIO, "error setting audio callback"); return false; } err = AudioUnitSetParameter(audioUnit, kHALOutputParam_Volume, kAudioUnitParameterFlag_Output, 0, m_volume / 100., 0); if (err != noErr) ERROR_LOG(AUDIO, "error setting volume"); err = AudioUnitInitialize(audioUnit); if (err != noErr) { ERROR_LOG(AUDIO, "error initializing audiounit"); return false; } err = AudioOutputUnitStart(audioUnit); if (err != noErr) { ERROR_LOG(AUDIO, "error starting audiounit"); return false; } return true;}
开发者ID:Chiri23,项目名称:dolphin,代码行数:68,
示例24: ca_reset_playback//.........这里部分代码省略......... } /* FIXME: How to tell what channels are what in the output device, and how * to specify what we're giving? eg, 6.0 vs 5.1 */ switch(streamFormat.mChannelsPerFrame) { case 1: device->FmtChans = DevFmtMono; break; case 2: device->FmtChans = DevFmtStereo; break; case 4: device->FmtChans = DevFmtQuad; break; case 6: device->FmtChans = DevFmtX51; break; case 7: device->FmtChans = DevFmtX61; break; case 8: device->FmtChans = DevFmtX71; break; default: ERR("Unhandled channel count (%d), using Stereo/n", streamFormat.mChannelsPerFrame); device->FmtChans = DevFmtStereo; streamFormat.mChannelsPerFrame = 2; break; } SetDefaultWFXChannelOrder(device); /* use channel count and sample rate from the default output unit's current * parameters, but reset everything else */ streamFormat.mFramesPerPacket = 1; streamFormat.mFormatFlags = 0; switch(device->FmtType) { case DevFmtUByte: device->FmtType = DevFmtByte; /* fall-through */ case DevFmtByte: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; streamFormat.mBitsPerChannel = 8; break; case DevFmtUShort: device->FmtType = DevFmtShort; /* fall-through */ case DevFmtShort: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; streamFormat.mBitsPerChannel = 16; break; case DevFmtUInt: device->FmtType = DevFmtInt; /* fall-through */ case DevFmtInt: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; streamFormat.mBitsPerChannel = 32; break; case DevFmtFloat: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat; streamFormat.mBitsPerChannel = 32; break; } streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame * streamFormat.mBitsPerChannel / 8; streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags |= kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if(err != noErr) { ERR("AudioUnitSetProperty failed/n"); return ALC_FALSE; } /* setup callback */ data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); input.inputProc = ca_callback; input.inputProcRefCon = device; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct)); if(err != noErr) { ERR("AudioUnitSetProperty failed/n"); return ALC_FALSE; } /* init the default audio unit... */ err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed/n"); return ALC_FALSE; } return ALC_TRUE;}
开发者ID:carriercomm,项目名称:openal-soft-chowdren,代码行数:101,
示例25: new_fluid_core_audio_driver2//.........这里部分代码省略......... pa.mElement = kAudioObjectPropertyElementMaster; if (OK (AudioObjectGetPropertyDataSize (kAudioObjectSystemObject, &pa, 0, 0, &size))) { int num = size / (int) sizeof (AudioDeviceID); AudioDeviceID devs [num]; if (OK (AudioObjectGetPropertyData (kAudioObjectSystemObject, &pa, 0, 0, &size, devs))) { for (i = 0; i < num; ++i) { char name [1024]; size = sizeof (name); pa.mSelector = kAudioDevicePropertyDeviceName; if (OK (AudioObjectGetPropertyData (devs[i], &pa, 0, 0, &size, name))) { if (get_num_outputs (devs[i]) > 0 && strcasecmp(devname, name) == 0) { AudioDeviceID selectedID = devs[i]; status = AudioUnitSetProperty (dev->outputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &selectedID, sizeof(AudioDeviceID)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the selected output device. Status=%ld/n", (long int)status); goto error_recovery; } } } } } } } if (devname) FLUID_FREE (devname); /* free device name */ dev->buffer_size = period_size * periods; // The DefaultOutputUnit should do any format conversions // necessary from our format to the device's format. dev->format.mSampleRate = sample_rate; // sample rate of the audio stream dev->format.mFormatID = kAudioFormatLinearPCM; // encoding type of the audio stream dev->format.mFormatFlags = kLinearPCMFormatFlagIsFloat; dev->format.mBytesPerPacket = 2*sizeof(float); dev->format.mFramesPerPacket = 1; dev->format.mBytesPerFrame = 2*sizeof(float); dev->format.mChannelsPerFrame = 2; dev->format.mBitsPerChannel = 8*sizeof(float); FLUID_LOG (FLUID_DBG, "mSampleRate %g", dev->format.mSampleRate); FLUID_LOG (FLUID_DBG, "mFormatFlags %08X", dev->format.mFormatFlags); FLUID_LOG (FLUID_DBG, "mBytesPerPacket %d", dev->format.mBytesPerPacket); FLUID_LOG (FLUID_DBG, "mFramesPerPacket %d", dev->format.mFramesPerPacket); FLUID_LOG (FLUID_DBG, "mChannelsPerFrame %d", dev->format.mChannelsPerFrame); FLUID_LOG (FLUID_DBG, "mBytesPerFrame %d", dev->format.mBytesPerFrame); FLUID_LOG (FLUID_DBG, "mBitsPerChannel %d", dev->format.mBitsPerChannel); status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &dev->format, sizeof(AudioStreamBasicDescription)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the audio format. Status=%ld/n", (long int)status); goto error_recovery; } status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Input, 0, &dev->buffer_size, sizeof(unsigned int)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Failed to set the MaximumFramesPerSlice. Status=%ld/n", (long int)status); goto error_recovery; } FLUID_LOG (FLUID_DBG, "MaximumFramesPerSlice = %d", dev->buffer_size); dev->buffers[0] = FLUID_ARRAY(float, dev->buffer_size); dev->buffers[1] = FLUID_ARRAY(float, dev->buffer_size); // Initialize the audio unit status = AudioUnitInitialize(dev->outputUnit); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error calling AudioUnitInitialize(). Status=%ld/n", (long int)status); goto error_recovery; } // Start the rendering status = AudioOutputUnitStart (dev->outputUnit); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error calling AudioOutputUnitStart(). Status=%ld/n", (long int)status); goto error_recovery; } return (fluid_audio_driver_t*) dev;error_recovery: delete_fluid_core_audio_driver((fluid_audio_driver_t*) dev); return NULL;}
开发者ID:RangelReale,项目名称:fluidsynth-fromsvn,代码行数:101,
示例26: returnJNIEXPORT jint JNICALL Java_com_apple_audio_units_AudioUnit_AudioUnitInitialize (JNIEnv *, jclass, jint ci){ return (jint)AudioUnitInitialize((AudioUnit)ci);}
开发者ID:fruitsamples,项目名称:Java,代码行数:5,
示例27: aubio_audio_unit_init//.........这里部分代码省略......... if (err) { AUBIO_ERR("audio_unit: could not set preferred latency (%d)/n", (int)err); goto fail; }#if 0 // only for iphone OS >= 3.1 UInt32 val = 1; // set to 0 (default) to use ear speaker in voice application err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(UInt32), &val); if (err) { AUBIO_ERR("audio_unit: could not set session property to default to speaker/n"); }#endif /* setting up audio unit */ AudioComponentDescription desc; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentType = kAudioUnitType_Output; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioStreamBasicDescription audioFormat; /* look for a component that match the description */ AudioComponent comp = AudioComponentFindNext(NULL, &desc); /* create the audio component */ AudioUnit *audio_unit = &(o->audio_unit); err = AudioComponentInstanceNew(comp, &(o->audio_unit)); if (err) { AUBIO_ERR("audio_unit: failed creating the audio unit/n"); goto fail; } /* enable IO */ UInt32 enabled = 1; err = AudioUnitSetProperty (*audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enabled, sizeof(enabled)); if (err) { AUBIO_ERR("audio_unit: failed enabling input of audio unit/n"); goto fail; } /* set max fps */ UInt32 max_fps = MIN(o->blocksize, MAX_FPS); err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &max_fps, sizeof(max_fps)); if (err) { AUBIO_ERR("audio_unit: could not set maximum frames per slice property (%d)/n", (int)err); goto fail; } AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &(o->au_ios_cb_struct), sizeof(o->au_ios_cb_struct)); if (err) { AUBIO_ERR("audio_unit: failed setting audio unit render callback/n"); goto fail; }#if 0 err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Input, 0, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate/n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 1, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate/n"); goto fail; }#endif audioFormat.mSampleRate = (Float64)samplerate; audioFormat.mChannelsPerFrame = 2; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mBitsPerChannel = 8 * sizeof(SInt16);#if 1 // interleaving audioFormat.mBytesPerFrame = 2 * sizeof(SInt16); audioFormat.mBytesPerPacket = 2 * sizeof(SInt16);#else audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = sizeof(SInt32); audioFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;#endif err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio output format/n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio input format/n"); goto fail; }#if 0 AudioStreamBasicDescription thruFormat; thissize = sizeof(thruFormat); err = AudioUnitGetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &thruFormat, &thissize); if (err) { AUBIO_ERR("audio_unit: could not get speaker output format, err: %d/n", (int)err); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, sizeof(thruFormat)); if (err) { AUBIO_ERR("audio_unit: could not set input audio format, err: %d/n", (int)err); goto fail; }#endif /* time to initialize the unit */ err = AudioUnitInitialize(*audio_unit); if (err) { AUBIO_ERR("audio_unit: failed initializing audio, err: %d/n", (int)err); goto fail; } return 0;fail: return err;}
开发者ID:Craig-J,项目名称:RhythMIR,代码行数:101,
示例28: prepare_audiounitstatic intprepare_audiounit(_THIS, const char *devname, int iscapture, const AudioStreamBasicDescription * strdesc){ OSStatus result = noErr; AURenderCallbackStruct callback; AudioComponentDescription desc; AudioComponent comp = NULL; UInt32 enableIO = 0; const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); SDL_memset(&desc, '/0', sizeof(AudioComponentDescription)); desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) { fprintf(stderr, "Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ /* AudioComponentInstanceNew only available on iPhone OS 2.0 and Mac OS X 10.6 We can't use OpenAComponent on iPhone because it is not present */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); this->hidden->audioUnitOpened = 1; // !!! FIXME: this is wrong? enableIO = ((iscapture) ? 1 : 0); result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, input_bus, &enableIO, sizeof(enableIO)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_EnableIO input)"); // !!! FIXME: this is wrong? enableIO = ((iscapture) ? 0 : 1); result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, output_bus, &enableIO, sizeof(enableIO)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_EnableIO output)"); /*result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); */ /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, '/0', sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetInputCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); /* We're running! */ return 1;}
开发者ID:arcanon,项目名称:ipadflash,代码行数:93,
注:本文中的AudioUnitInitialize函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ AudioUnitSetParameter函数代码示例 C++ AudioUnitGetProperty函数代码示例 |