您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ AudioDeviceGetProperty函数代码示例

51自学网 2021-06-01 19:48:20
  C++
这篇教程C++ AudioDeviceGetProperty函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中AudioDeviceGetProperty函数的典型用法代码示例。如果您正苦于以下问题:C++ AudioDeviceGetProperty函数的具体用法?C++ AudioDeviceGetProperty怎么用?C++ AudioDeviceGetProperty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了AudioDeviceGetProperty函数的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: InitializeDeviceInfo

static PaError InitializeDeviceInfo(PaMacCoreDeviceInfo *macCoreDeviceInfo,  AudioDeviceID macCoreDeviceId, PaHostApiIndex hostApiIndex ){    PaDeviceInfo *deviceInfo = &macCoreDeviceInfo->inheritedDeviceInfo;    deviceInfo->structVersion = 2;    deviceInfo->hostApi = hostApiIndex;        PaError err = paNoError;    UInt32 propSize;    err = conv_err(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));    // FIXME: this allocation should be part of the allocations group    char *name = PaUtil_AllocateMemory(propSize);    err = conv_err(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));    if (!err) {        deviceInfo->name = name;    }        Float64 sampleRate;    propSize = sizeof(Float64);    err = conv_err(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));    if (!err) {        deviceInfo->defaultSampleRate = sampleRate;    }    // Get channel info    err = GetChannelInfo(deviceInfo, macCoreDeviceId, 1);    err = GetChannelInfo(deviceInfo, macCoreDeviceId, 0);    return err;}
开发者ID:0ryuO,项目名称:dolphin-avsync,代码行数:31,


示例2: getVolume

static int getVolume(int dir, double *left, double *right){  UInt32	sz;  AudioDeviceID	id;  Float32	chan1, chan2;  if (!getDefaultDevice(&id, dir))    return 0;  sz= sizeof(chan1);  if (checkError(AudioDeviceGetProperty(id, 1, // left					dir, kAudioDevicePropertyVolumeScalar,					&sz, &chan1),		 "GetProperty", "VolumeScalar"))    return 0;  sz= sizeof(chan2);  if (checkError(AudioDeviceGetProperty(id, 2, // right					dir, kAudioDevicePropertyVolumeScalar,					&sz, &chan2),		 "GetProperty", "VolumeScalar"))    chan2= chan1;  *left=  chan1;  *right= chan2;  return 1;}
开发者ID:bhatti,项目名称:RoarVM,代码行数:27,


示例3: m_bIsRunning

CoreAudioDriver::CoreAudioDriver( audioProcessCallback processCallback )		: H2Core::AudioOutput( __class_name )		, m_bIsRunning( false )		, mProcessCallback( processCallback )		, m_pOut_L( NULL )		, m_pOut_R( NULL ){	//INFOLOG( "INIT" );	m_nSampleRate = Preferences::get_instance()->m_nSampleRate;	//  m_nBufferSize = Preferences::get_instance()->m_nBufferSize;	//  BufferSize is currently set to match the default audio device.	OSStatus err;	UInt32 size = sizeof( AudioDeviceID );	err = AudioHardwareGetProperty(			  kAudioHardwarePropertyDefaultOutputDevice,			  &size,			  &m_outputDevice		  );	if ( err != noErr ) {		ERRORLOG( "Could not get Default Output Device" );	}	UInt32 dataSize = sizeof( m_nBufferSize );	err = AudioDeviceGetProperty(			  m_outputDevice,			  0,			  false,			  kAudioDevicePropertyBufferFrameSize,			  &dataSize,			  ( void * )&m_nBufferSize		  );	if ( err != noErr ) {		ERRORLOG( "get BufferSize error" );	}	INFOLOG( QString( "Buffersize: %1" ).arg( m_nBufferSize ) );	// print some info	AudioStreamBasicDescription outputStreamBasicDescription;	UInt32 propertySize = sizeof( outputStreamBasicDescription );	err = AudioDeviceGetProperty( m_outputDevice, 0, 0, kAudioDevicePropertyStreamFormat, &propertySize, &outputStreamBasicDescription );	if ( err ) {		printf( "AudioDeviceGetProperty: returned %d when getting kAudioDevicePropertyStreamFormat", err );	}	INFOLOG( QString("SampleRate: %1").arg( outputStreamBasicDescription.mSampleRate ) );	INFOLOG( QString("BytesPerPacket: %1").arg( outputStreamBasicDescription.mBytesPerPacket ) );	INFOLOG( QString("FramesPerPacket: %1").arg( outputStreamBasicDescription.mFramesPerPacket ) );	INFOLOG( QString("BytesPerFrame: %1").arg( outputStreamBasicDescription.mBytesPerFrame ) );	INFOLOG( QString("ChannelsPerFrame: %1").arg( outputStreamBasicDescription.mChannelsPerFrame ) );	INFOLOG( QString("BitsPerChannel: %1").arg( outputStreamBasicDescription.mBitsPerChannel ) );}
开发者ID:Cesmith2,项目名称:hydrogen,代码行数:55,


示例4: AudioOutputGetVolume

OSStatus AudioOutputGetVolume(AudioDeviceID device, Float32 *left, Float32 *right) {  UInt32 size = (UInt32)sizeof(Float32);  OSStatus err = AudioObjectGetPropertyData(device, &kAudioOutputVolumeProperty, 0, NULL, &size, left);  if (noErr == err) {    *right = *left;  } else if (kAudioHardwareUnknownPropertyError == err) {    UInt32 channels[2];    size = (UInt32)sizeof(Float32);    err = AudioOutputGetStereoChannels(device, &channels[0], &channels[1]);    if (noErr == err) err = AudioDeviceGetProperty(device, channels[0], FALSE, kAudioDevicePropertyVolumeScalar, &size, left);    if (noErr == err) err = AudioDeviceGetProperty(device, channels[1], FALSE, kAudioDevicePropertyVolumeScalar, &size, right);  }  return err;}
开发者ID:lujianmei,项目名称:Spark,代码行数:14,


示例5: check_card_capability

static bool_t check_card_capability(AudioDeviceID id, bool_t is_input, char * devname, char *uidname, size_t name_len) {    unsigned int slen=name_len;    Boolean writable=0;    CFStringRef dUID=NULL;    bool_t ret=FALSE;    int err =AudioDeviceGetProperty(id, 0, is_input, kAudioDevicePropertyDeviceName, &slen,devname);    if (err != kAudioHardwareNoError) {        ms_error("get kAudioDevicePropertyDeviceName error %ld", err);        return FALSE;    }    err =AudioDeviceGetPropertyInfo(id, 0, is_input, kAudioDevicePropertyStreamConfiguration, &slen, &writable);    if (err != kAudioHardwareNoError) {        ms_error("get kAudioDevicePropertyDeviceName error %ld", err);        return FALSE;    }    AudioBufferList *buflist = ms_malloc(slen);    err =        AudioDeviceGetProperty(id, 0, is_input, kAudioDevicePropertyStreamConfiguration, &slen, buflist);    if (err != kAudioHardwareNoError) {        ms_error("get kAudioDevicePropertyDeviceName error %ld", err);        ms_free(buflist);        return FALSE;    }    UInt32 j;    for (j = 0; j < buflist->mNumberBuffers; j++) {        if (buflist->mBuffers[j].mNumberChannels > 0) {            ret=TRUE;            break;        }    }    ms_free(buflist);    if (ret==FALSE) return FALSE;    slen = sizeof(CFStringRef);    err =AudioDeviceGetProperty(id, 0, is_input, kAudioDevicePropertyDeviceUID, &slen,&dUID);    if (err != kAudioHardwareNoError) {        ms_error("get kAudioHardwarePropertyDevices error %ld", err);        return FALSE;    }    CFStringGetCString(dUID, uidname, sizeof(uidname),CFStringGetSystemEncoding());    ms_message("CA: devname:%s uidname:%s", devname, uidname);    return ret;}
开发者ID:flybird119,项目名称:meetphone,代码行数:49,


示例6: audio_cap_ca_help

static void audio_cap_ca_help(const char *driver_name){        UNUSED(driver_name);        OSErr ret;        AudioDeviceID *dev_ids;        int dev_items;        int i;        UInt32 size;        printf("/tcoreaudio : default CoreAudio input/n");        ret = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDevices, &size, NULL);        if(ret) goto error;        dev_ids = malloc(size);        dev_items = size / sizeof(AudioDeviceID);        ret = AudioHardwareGetProperty(kAudioHardwarePropertyDevices, &size, dev_ids);        if(ret) goto error;        for(i = 0; i < dev_items; ++i)        {                char name[128];                                size = sizeof(name);                ret = AudioDeviceGetProperty(dev_ids[i], 0, 0, kAudioDevicePropertyDeviceName, &size, name);                fprintf(stderr,"/tcoreaudio:%d : %s/n", (int) dev_ids[i], name);        }        free(dev_ids);        return;error:        fprintf(stderr, "[CoreAudio] error obtaining device list./n");}
开发者ID:k4rtik,项目名称:ultragrid,代码行数:32,


示例7: SetFramesPerBuffer

static PaError SetFramesPerBuffer(AudioDeviceID device, unsigned long framesPerBuffer, int isInput){    PaError result = paNoError;    UInt32 preferredFramesPerBuffer = framesPerBuffer;    //    while (preferredFramesPerBuffer > UINT32_MAX) {    //        preferredFramesPerBuffer /= 2;    //    }        UInt32 actualFramesPerBuffer;    UInt32 propSize = sizeof(UInt32);    result = conv_err(AudioDeviceSetProperty(device, NULL, 0, isInput, kAudioDevicePropertyBufferFrameSize, propSize, &preferredFramesPerBuffer));        result = conv_err(AudioDeviceGetProperty(device, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &actualFramesPerBuffer));        if (result != paNoError) {        // do nothing    }    else if (actualFramesPerBuffer > framesPerBuffer) {        result = paBufferTooSmall;    }    else if (actualFramesPerBuffer < framesPerBuffer) {        result = paBufferTooBig;    }        return result;    }
开发者ID:0ryuO,项目名称:dolphin-avsync,代码行数:26,


示例8: AudioDeviceGetPropertyInfo

std::vector<UInt32> CoreAudioUtilities::dataSourceList(AudioDeviceID id, bool isInput) {	OSStatus status = noErr;	std::vector<UInt32> result;	Boolean input = (isInput ? TRUE : FALSE);	UInt32 size = 0;	status = AudioDeviceGetPropertyInfo(id, 0, input, kAudioDevicePropertyDataSources, &size, NULL);	if (status) {		LOG_ERROR("Can't get device property info: kAudioDevicePropertyDataSources");		return result;	}	if (!size) {		return result;	}	UInt32 * ids = (UInt32 *) malloc(size);	status = AudioDeviceGetProperty(id, 0, input, kAudioDevicePropertyDataSources, &size, ids);	if (status) {		LOG_ERROR("Can't get device property: kAudioDevicePropertyDataSources");	} else {		for (unsigned i = 0; i < (size / sizeof(UInt32)); i++) {			result.push_back(ids[i]);		}	}	free(ids);	return result;}
开发者ID:gabrieldelsaint,项目名称:UIM,代码行数:30,


示例9: m_deviceID

PlexAudioDevice::PlexAudioDevice(AudioDeviceID deviceID)  : m_deviceID(deviceID)  , m_isValid(false)  , m_supportsDigital(false){  UInt32   paramSize = 0;  OSStatus err = noErr;  // Retrieve the length of the device name.  SAFELY(AudioDeviceGetPropertyInfo(deviceID, 0, false, kAudioDevicePropertyDeviceName, &paramSize, NULL));  if (err == noErr)  {    // Retrieve the name of the device.    char* pStrName = new char[paramSize];    pStrName[0] = '/0';        SAFELY(AudioDeviceGetProperty(deviceID, 0, false, kAudioDevicePropertyDeviceName, &paramSize, pStrName));    if (err == noErr)    {      m_deviceName = pStrName;            // See if the device is writable (can output).      m_hasOutput = computeHasOutput();            // If the device does have output, see if it supports digital.      if (m_hasOutput)        m_supportsDigital = computeDeviceSupportsDigital();            m_isValid = true;    }        delete[] pStrName;  }}
开发者ID:Castlecard,项目名称:plex,代码行数:34,


示例10: deviceIDsArraySize

QList<AudioDeviceID> UBAudioQueueRecorder::inputDeviceIDs(){    QList<AudioDeviceID> inputDeviceIDs;    UInt32 deviceIDsArraySize(0);    AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDevices, &deviceIDsArraySize, 0);    AudioDeviceID deviceIDs[deviceIDsArraySize / sizeof(AudioDeviceID)];    AudioHardwareGetProperty(kAudioHardwarePropertyDevices, &deviceIDsArraySize, deviceIDs);    int deviceIDsCount = deviceIDsArraySize / sizeof(AudioDeviceID);    for (int i = 0; i < deviceIDsCount; i ++)    {        AudioStreamBasicDescription sf;        UInt32 size = sizeof(AudioStreamBasicDescription);        if (noErr == AudioDeviceGetProperty(deviceIDs[i], 0, true, kAudioDevicePropertyStreamFormat,  &size, &sf))        {                inputDeviceIDs << deviceIDs[i];        }    }    /*    foreach(AudioDeviceID id, inputDeviceIDs)    {            qDebug() << "Device" << id <<  deviceNameFromDeviceID(id) << deviceUIDFromDeviceID(id);    }    */    return inputDeviceIDs;}
开发者ID:coachal,项目名称:Sankore-3.1,代码行数:33,


示例11: Stream_setFormat

// setup conversion from Squeak to device frame format, or vice-versa.// requires: stereo for output, stereo or mono for input.//static int Stream_setFormat(Stream *s, int frameCount, int sampleRate, int stereo){  int nChannels=	1 + stereo;  AudioStreamBasicDescription imgFmt, devFmt;  UInt32 sz= sizeof(devFmt);  if (0 == s->direction) nChannels= 2;	// insist  if (checkError(AudioDeviceGetProperty(s->id, 0, s->direction,					kAudioDevicePropertyStreamFormat,					&sz, &devFmt),		 "GetProperty", "StreamFormat"))    return 0;  debugf("stream %p[%d] device format:/n", s, s->direction);  dumpFormat(&devFmt);  imgFmt.mSampleRate	   = sampleRate;  imgFmt.mFormatID	   = kAudioFormatLinearPCM;#if defined(WORDS_BIGENDIAN)  imgFmt.mFormatFlags	   = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian;#else  imgFmt.mFormatFlags	   = kLinearPCMFormatFlagIsSignedInteger;#endif  imgFmt.mBytesPerPacket   = SqueakFrameSize / (3 - nChannels);  imgFmt.mFramesPerPacket  = 1;  imgFmt.mBytesPerFrame    = SqueakFrameSize / (3 - nChannels);  imgFmt.mChannelsPerFrame = nChannels;  imgFmt.mBitsPerChannel   = 16;  debugf("stream %p[%d] image format:/n", s, s->direction);  dumpFormat(&imgFmt);  if (s->direction) // input    {      if (checkError(AudioConverterNew(&devFmt, &imgFmt, &s->converter), "AudioConverter", "New"))	return 0;      sz= sizeof(s->cvtBufSize);      s->cvtBufSize= 512 * devFmt.mBytesPerFrame;      if (checkError(AudioConverterGetProperty(s->converter, kAudioConverterPropertyCalculateOutputBufferSize,					       &sz, &s->cvtBufSize), 		     "GetProperty", "OutputBufferSize"))	return 0;    }  else // output    {      if (checkError(AudioConverterNew(&imgFmt, &devFmt, &s->converter), "AudioConverter", "New"))	return 0;    }  s->channels=   nChannels;  s->sampleRate= sampleRate;  s->imgBufSize= SqueakFrameSize * nChannels * frameCount;  frameCount= max(frameCount, 512 * sampleRate / devFmt.mSampleRate);  s->buffer= Buffer_new((s->direction ? DeviceFrameSize : SqueakFrameSize) * nChannels * frameCount * 2);  debugf("stream %p[%d] sound buffer size %d/%d (%d)/n", s, s->direction, s->imgBufSize, s->buffer->size, frameCount);  return 1;}
开发者ID:lsehub,项目名称:Handle,代码行数:63,


示例12: SAFELY

bool PlexAudioDevice::computeDeviceSupportsDigital(){  bool ret = false;    OSStatus err = noErr;  UInt32   paramSize = 0;      // Retrieve all the output streams.  SAFELY(AudioDeviceGetPropertyInfo(m_deviceID, 0, FALSE, kAudioDevicePropertyStreams, &paramSize, NULL));  if (err == noErr)  {    int numStreams = paramSize / sizeof(AudioStreamID);    AudioStreamID* pStreams = (AudioStreamID *)malloc(paramSize);    SAFELY(AudioDeviceGetProperty(m_deviceID, 0, FALSE, kAudioDevicePropertyStreams, &paramSize, pStreams));    if (err == noErr)    {      for (int i=0; i<numStreams && ret == false; i++)      {        if (computeStreamSupportsDigital(pStreams[i]))            ret = true;      }    }    free(pStreams);  }    return ret;}
开发者ID:Castlecard,项目名称:plex,代码行数:29,


示例13: AudioDeviceGetPropertyInfo

/*==========idAudioHardwareOSX::GetAvailableNominalSampleRates==========*/void idAudioHardwareOSX::GetAvailableNominalSampleRates( void ){    UInt32				size;    OSStatus			status;    int			   		i, rangeCount;    AudioValueRange		*rangeArray;    status = AudioDeviceGetPropertyInfo( selectedDevice, 0, false, kAudioDevicePropertyAvailableNominalSampleRates, &size, NULL );    if ( status != kAudioHardwareNoError )    {        common->Warning( "AudioDeviceGetPropertyInfo %d kAudioDevicePropertyAvailableNominalSampleRates failed. status: %s", selectedDevice, ExtractStatus( status ) );        return;    }    rangeCount = size / sizeof( AudioValueRange );    rangeArray = (AudioValueRange *)malloc( size );    common->Printf( "%d possible rate(s)/n", rangeCount );    status = AudioDeviceGetProperty( selectedDevice, 0, false, kAudioDevicePropertyAvailableNominalSampleRates, &size, rangeArray );    if ( status != kAudioHardwareNoError )    {        common->Warning( "AudioDeviceGetProperty %d kAudioDevicePropertyAvailableNominalSampleRates failed. status: %s", selectedDevice, ExtractStatus( status ) );        free( rangeArray );        return;    }    for( i = 0; i < rangeCount; i++ )    {        common->Printf( "  %d: min %g max %g/n", i, rangeArray[ i ].mMinimum, rangeArray[ i ].mMaximum );    }    free( rangeArray );}
开发者ID:revelator,项目名称:Revelator-Doom3,代码行数:38,


示例14: AudioDeviceGetPropertyInfo

bool CAUOutputDevice::GetPreferredChannelLayout(CCoreAudioChannelLayout& layout){  if (!m_DeviceId)    return false;  UInt32 propertySize = 0;  Boolean writable = false;  OSStatus ret = AudioDeviceGetPropertyInfo(m_DeviceId, 0, false,    kAudioDevicePropertyPreferredChannelLayout, &propertySize, &writable);  if (ret)    return false;  void* pBuf = malloc(propertySize);  ret = AudioDeviceGetProperty(m_DeviceId, 0, false,    kAudioDevicePropertyPreferredChannelLayout, &propertySize, pBuf);  if (ret)    CLog::Log(LOGERROR, "CAUOutputDevice::GetPreferredChannelLayout: "      "Unable to retrieve preferred channel layout. Error = %s", GetError(ret).c_str());  else  {    // Copy the result into the caller's instance    layout.CopyLayout(*((AudioChannelLayout*)pBuf));  }  free(pBuf);  return (ret == noErr);}
开发者ID:JohnsonAugustine,项目名称:xbmc-rbp,代码行数:26,


示例15: display_device_names

static OSStatus display_device_names(){	UInt32 size;	Boolean isWritable;	int i, deviceNum;	OSStatus err;	CFStringRef UIname;		err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDevices, &size, &isWritable);    if (err != noErr) 		return err;			deviceNum = size/sizeof(AudioDeviceID);	AudioDeviceID devices[deviceNum];		err = AudioHardwareGetProperty(kAudioHardwarePropertyDevices, &size, devices);    if (err != noErr) 		return err;		for (i = 0; i < deviceNum; i++) {        char device_name[256];		char internal_name[256];				size = sizeof(CFStringRef);		UIname = NULL;		err = AudioDeviceGetProperty(devices[i], 0, false, kAudioDevicePropertyDeviceUID, &size, &UIname);		if (err == noErr) {			CFStringGetCString(UIname, internal_name, 256, CFStringGetSystemEncoding());		} else {			goto error;		}				size = 256;		err = AudioDeviceGetProperty(devices[i], 0, false, kAudioDevicePropertyDeviceName, &size, device_name);		if (err != noErr) 			return err; 		jack_info("ICI");		jack_info("Device name = /'%s/', internal_name = /'%s/' (to be used as -d parameter)", device_name, internal_name); 	}		return noErr;error:	if (UIname != NULL)		CFRelease(UIname);	return err;}
开发者ID:Llefjord,项目名称:jack1,代码行数:47,


示例16: gviHardwareInitCapture

static GVBool gviHardwareInitCapture(GVIDevice * device){	GVIHardwareData * data = (GVIHardwareData *)device->m_data;	UInt32 size;	OSStatus result;	GVICapturedFrame * frame;	int numCaptureBufferBytes;	int numCaptureBufferFrames;	int i;	// get the capture format	size = sizeof(AudioStreamBasicDescription);	result = AudioDeviceGetProperty(device->m_deviceID, 0, true, kAudioDevicePropertyStreamFormat, &size, &data->m_captureStreamDescriptor);	if(result != noErr)		return GVFalse;	// create a converter from the capture format to the GV format	result = AudioConverterNew(&data->m_captureStreamDescriptor, &GVIVoiceFormat, &data->m_captureConverter);	if(result != noErr)		return GVFalse;	// allocate a capture buffer	data->m_captureBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);	if(!data->m_captureBuffer)	{		AudioConverterDispose(data->m_captureConverter);		return GVFalse;			}	// allocate space for holding captured frames	numCaptureBufferBytes = gviMultiplyByBytesPerMillisecond(GVI_CAPTURE_BUFFER_MILLISECONDS);	numCaptureBufferBytes = gviRoundUpToNearestMultiple(numCaptureBufferBytes, GVIBytesPerFrame);	numCaptureBufferFrames = (numCaptureBufferBytes / GVIBytesPerFrame);	for(i = 0 ; i < numCaptureBufferFrames ; i++)	{		frame = (GVICapturedFrame *)gsimalloc(sizeof(GVICapturedFrame) + GVIBytesPerFrame - sizeof(GVSample));		if(!frame)		{			gviFreeCapturedFrames(&data->m_captureAvailableFrames);			gsifree(data->m_captureBuffer);			AudioConverterDispose(data->m_captureConverter);			return GVFalse;		}		gviPushFirstFrame(&data->m_captureAvailableFrames, frame);	}	// init the last crossed time	data->m_captureLastCrossedThresholdTime = (data->m_captureClock - GVI_HOLD_THRESHOLD_FRAMES - 1);	// add property listener	AudioDeviceAddPropertyListener(device->m_deviceID, 0, true, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);#if GVI_VOLUME_IN_SOFTWARE	// init volume	data->m_captureVolume = (GVScalar)1.0;#endif	return GVTrue;}
开发者ID:AntonioModer,项目名称:xray-16,代码行数:59,


示例17: sizeof

void    AudioDevice::SetBufferSize(UInt32 size){    UInt32 propsize = sizeof(UInt32);    verify_noerr(AudioDeviceSetProperty(mID, NULL, 0, mIsInput, kAudioDevicePropertyBufferFrameSize, propsize, &size));    propsize = sizeof(UInt32);    verify_noerr(AudioDeviceGetProperty(mID, 0, mIsInput, kAudioDevicePropertyBufferFrameSize, &propsize, &mBufferSizeFrames));}
开发者ID:swbiggart,项目名称:xwaxMac,代码行数:8,


示例18: audio_list_devices

void audio_list_devices (){	unsigned long size, devid [10], ndev, n, alive;	char devname [100], manuf [100];	size = sizeof (devid);	if (AudioHardwareGetProperty (kAudioHardwarePropertyDevices,	    &size, &devid) != 0) {		fprintf (stderr, "audio: cannot get audio devices/n");		exit (-1);	}	ndev = size / sizeof (devid[0]);	printf ("Found %ld audio device%s./n", ndev, ndev==1 ? "" : "s");	for (n=0; n<ndev; ++n) {		printf ("/nDevice %ld: id %08lx/n", n, devid [n]);		size = sizeof (devname);		if (AudioDeviceGetProperty (devid [n], 0, false,		    kAudioDevicePropertyDeviceName, &size, &devname) != 0) {			fprintf (stderr, "audio: cannot get device name/n");			continue;		}		printf ("Name: %s/n", devname);		size = sizeof (manuf);		if (AudioDeviceGetProperty (devid [n], 0, false,		    kAudioDevicePropertyDeviceManufacturer, &size, &manuf) != 0) {			fprintf (stderr, "audio: cannot get device manufacturer/n");			continue;		}		printf ("Manufacturer: %s/n", manuf);		size = sizeof (alive);		if (AudioDeviceGetProperty (devid [n], 0, false,		    kAudioDevicePropertyDeviceIsAlive, &size, &alive) != 0) {			fprintf (stderr, "audio: cannot get device activity/n");			continue;		}		printf ("Alive: %s/n", alive ? "Yes" : "No");		audio_list_streams (devid [n], 0);		audio_list_streams (devid [n], 1);	}}
开发者ID:denrusio,项目名称:vak-opensource,代码行数:45,


示例19: AudioOutputIsMuted

OSStatus AudioOutputIsMuted(AudioDeviceID device, Boolean *mute) {  UInt32 value = 0;  UInt32 size = (UInt32)sizeof(UInt32);  OSStatus err = AudioDeviceGetProperty(device, 0, FALSE, kAudioDevicePropertyMute, &size, &value);  if (noErr == err) {    *mute = value ? TRUE : FALSE;  }  return err;  }
开发者ID:lujianmei,项目名称:Spark,代码行数:9,


示例20: get_device_name_from_id

static OSStatus get_device_name_from_id(AudioDeviceID id, char name[60]){    UInt32 size = sizeof(char) * 60;	OSStatus stat = AudioDeviceGetProperty(id, 0, false,					   kAudioDevicePropertyDeviceName,					   &size,					   &name[0]);    return stat;}
开发者ID:Llefjord,项目名称:jack1,代码行数:9,


示例21: get_device_name_from_id

static OSStatus get_device_name_from_id(AudioDeviceID id, char name[256]){    UInt32 size = sizeof(char) * 256;    OSStatus res = AudioDeviceGetProperty(id, 0, false,					   kAudioDevicePropertyDeviceName,					   &size,					   &name[0]);    return res;}
开发者ID:Llefjord,项目名称:jack1,代码行数:9,


示例22: sizeof

tuint32 CDeviceCoreAudio::GetMaxBufferSize(){	UInt32 outSize = sizeof(AudioValueRange);	struct AudioValueRange AVR;	AudioDeviceGetProperty(mAudioDeviceID, 0, FALSE, kAudioDevicePropertyBufferFrameSizeRange, &outSize, &AVR);	return AVR.mMaximum;}
开发者ID:eriser,项目名称:koblo_software-1,代码行数:9,


示例23: sizeof

bool CCoreAudioDevice::IsRunning(){  UInt32 isRunning = false;  UInt32 size = sizeof(isRunning);  OSStatus ret = AudioDeviceGetProperty(m_DeviceId, 0, false, kAudioDevicePropertyDeviceIsRunning, &size, &isRunning);  if (ret)    return false;  return (isRunning != 0);}
开发者ID:flyingtime,项目名称:boxee,代码行数:9,


示例24: ca_init

static intca_init (void) {    UInt32 sz;    char device_name[128];    sz = sizeof(device_id);    if (AudioHardwareGetProperty (kAudioHardwarePropertyDefaultOutputDevice, &sz, &device_id)) {        return -1;    }    sz = sizeof (device_name);    if (AudioDeviceGetProperty (device_id, 1, 0, kAudioDevicePropertyDeviceName, &sz, device_name)) {           return -1;    }        sz = sizeof (default_format);    if (AudioDeviceGetProperty (device_id, 0, 0, kAudioDevicePropertyStreamFormat, &sz, &default_format)) {        return -1;    }    UInt32 bufsize = 4096;    sz = sizeof (bufsize);    if (AudioDeviceSetProperty(device_id, NULL, 0, 0, kAudioDevicePropertyBufferFrameSize, sz, &bufsize)) {        fprintf (stderr, "Failed to set buffer size/n");    }    if (ca_apply_format ()) {        return -1;    }    if (AudioDeviceAddIOProc (device_id, ca_buffer_callback, NULL)) {        return -1;    }        if (AudioDeviceAddPropertyListener (device_id, 0, 0, kAudioDevicePropertyStreamFormat, ca_fmtchanged, NULL)) {        return -1;    }        ca_fmtchanged(0, 0, 0, kAudioDevicePropertyStreamFormat, NULL);    state = OUTPUT_STATE_STOPPED;    return 0;}
开发者ID:Gardenya,项目名称:deadbeef,代码行数:44,


示例25: AudioOutputGetStereoChannels

OSStatus AudioOutputGetStereoChannels(AudioDeviceID device, UInt32 *left, UInt32 *right) {  UInt32 channels[2];  UInt32 size = (UInt32)sizeof(channels);  OSStatus err = AudioDeviceGetProperty(device, 0, FALSE, kAudioDevicePropertyPreferredChannelsForStereo, &size, &channels);  if (noErr == err) {    if (left) *left = channels[0];    if (right) *right = channels[1];  }  return err;}
开发者ID:lujianmei,项目名称:Spark,代码行数:10,



注:本文中的AudioDeviceGetProperty函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ AudioHardwareGetProperty函数代码示例
C++ AudioChannelSet函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。