您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ waveOutOpen函数代码示例

51自学网 2021-06-03 09:52:53
  C++
这篇教程C++ waveOutOpen函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中waveOutOpen函数的典型用法代码示例。如果您正苦于以下问题:C++ waveOutOpen函数的具体用法?C++ waveOutOpen怎么用?C++ waveOutOpen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了waveOutOpen函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: DIB_OpenAudio

int DIB_OpenAudio(_THIS, SDL_AudioSpec *spec){    MMRESULT result;    int i;    WAVEFORMATEX waveformat;    /* Initialize the wavebuf structures for closing */    sound = NULL;    audio_sem = NULL;    for ( i = 0; i < NUM_BUFFERS; ++i )        wavebuf[i].dwUser = 0xFFFF;    mixbuf = NULL;    /* Set basic WAVE format parameters */    memset(&waveformat, 0, sizeof(waveformat));    waveformat.wFormatTag = WAVE_FORMAT_PCM;    /* Determine the audio parameters from the AudioSpec */    switch ( spec->format & 0xFF ) {    case 8:        /* Unsigned 8 bit audio data */        spec->format = AUDIO_U8;        waveformat.wBitsPerSample = 8;        break;    case 16:        /* Signed 16 bit audio data */        spec->format = AUDIO_S16;        waveformat.wBitsPerSample = 16;        break;    default:        SDL_SetError("Unsupported audio format");        return(-1);    }    waveformat.nChannels = spec->channels;    waveformat.nSamplesPerSec = spec->freq;    waveformat.nBlockAlign =        waveformat.nChannels * (waveformat.wBitsPerSample/8);    waveformat.nAvgBytesPerSec =        waveformat.nSamplesPerSec * waveformat.nBlockAlign;    /* Check the buffer size -- minimum of 1/4 second (word aligned) */    if ( spec->samples < (spec->freq/4) )        spec->samples = ((spec->freq/4)+3)&~3;    /* Update the fragment size as size in bytes */    SDL_CalculateAudioSpec(spec);    /* Open the audio device */    result = waveOutOpen(&sound, WAVE_MAPPER, &waveformat,                         (DWORD)FillSound, (DWORD)this, CALLBACK_FUNCTION);    if ( result != MMSYSERR_NOERROR ) {        SetMMerror("waveOutOpen()", result);        return(-1);    }#ifdef SOUND_DEBUG    /* Check the sound device we retrieved */    {        WAVEOUTCAPS caps;        result = waveOutGetDevCaps((UINT)sound, &caps, sizeof(caps));        if ( result != MMSYSERR_NOERROR ) {            SetMMerror("waveOutGetDevCaps()", result);            return(-1);        }        printf("Audio device: %s/n", caps.szPname);    }#endif    /* Create the audio buffer semaphore */#if defined(_WIN32_WCE) && (_WIN32_WCE < 300)    audio_sem = CreateSemaphoreCE(NULL, NUM_BUFFERS-1, NUM_BUFFERS, NULL);#else    audio_sem = CreateSemaphore(NULL, NUM_BUFFERS-1, NUM_BUFFERS, NULL);#endif    if ( audio_sem == NULL ) {        SDL_SetError("Couldn't create semaphore");        return(-1);    }    /* Create the sound buffers */    mixbuf = (Uint8 *)malloc(NUM_BUFFERS*spec->size);    if ( mixbuf == NULL ) {        SDL_SetError("Out of memory");        return(-1);    }    for ( i = 0; i < NUM_BUFFERS; ++i ) {        memset(&wavebuf[i], 0, sizeof(wavebuf[i]));        wavebuf[i].lpData = (LPSTR) &mixbuf[i*spec->size];        wavebuf[i].dwBufferLength = spec->size;        wavebuf[i].dwFlags = WHDR_DONE;        result = waveOutPrepareHeader(sound, &wavebuf[i],                                      sizeof(wavebuf[i]));        if ( result != MMSYSERR_NOERROR ) {            SetMMerror("waveOutPrepareHeader()", result);            return(-1);        }    }    /* Ready to go! *///.........这里部分代码省略.........
开发者ID:wwzbwwzb,项目名称:fbdri,代码行数:101,


示例2: pcm_init

void pcm_init(){        WAVEFORMATEX format;        int i;        if (!sound) {                pcm.stereo = 0;                pcm.hz = 8000;                pcm.len = BUF_SIZ;                buf = (byte *) malloc(pcm.len * buffers);                pcm.buf = buf;                pcm.pos = 0;                snd.rate = (1<<21) / pcm.hz;                return;        }        semaph = CreateSemaphore(NULL, buffers - 1, buffers -1, NULL);        format.wFormatTag = WAVE_FORMAT_PCM;        format.nChannels = stereo ? 2 : 1;        format.nSamplesPerSec = samplerate;        format.wBitsPerSample = bits ? 16 : 8;        format.nBlockAlign = format.nChannels * format.wBitsPerSample / 8;        format.nAvgBytesPerSec = format.nSamplesPerSec * format.nBlockAlign;        format.cbSize = 0;        if (waveOutOpen(&wout, WAVE_MAPPER, &format,                                        (DWORD) woutcallback, (DWORD) semaph, CALLBACK_FUNCTION)                != MMSYSERR_NOERROR)                return;        cb_per_sample = format.wBitsPerSample / 8;        pcm.stereo = stereo;        pcm.hz = samplerate;        pcm.len = BUF_SIZ;        buf = (byte *)malloc(pcm.len * buffers * cb_per_sample);        pcm.buf = buf;        pcm.pos = 0;        snd.rate = (1<<21) / pcm.hz;        hdr = (WAVEHDR*)malloc(sizeof(WAVEHDR) * buffers);        for (i = 0; i < buffers; i++) {                hdr[i].lpData = (LPSTR) (buf + pcm.len * i * cb_per_sample);                hdr[i].dwBufferLength = pcm.len * cb_per_sample;                hdr[i].dwBytesRecorded = 0;                hdr[i].dwUser = 0;                hdr[i].dwFlags = 0;                hdr[i].dwLoops = 0;                hdr[i].lpNext = NULL;                hdr[i].reserved = 0;                waveOutPrepareHeader(wout, &hdr[i], sizeof WAVEHDR);        }        curbuf = 0;        total_bytes = 0;        last_wait_time = 0;        cb_per_sec = format.nAvgBytesPerSec;        waveOutPause(wout);        soundresume = 1;}
开发者ID:milot-mirdita,项目名称:PocketGnuBoy,代码行数:61,


示例3: DSOUND_ReopenDevice

HRESULT DSOUND_ReopenDevice(DirectSoundDevice *device, BOOL forcewave){	HRESULT hres = DS_OK;	TRACE("(%p, %d)/n", device, forcewave);	if (device->driver)	{		IDsDriver_Close(device->driver);		if (device->drvdesc.dwFlags & DSDDESC_DOMMSYSTEMOPEN)			waveOutClose(device->hwo);		IDsDriver_Release(device->driver);		device->driver = NULL;		device->buffer = NULL;		device->hwo = 0;	}	else if (device->drvdesc.dwFlags & DSDDESC_DOMMSYSTEMOPEN)		waveOutClose(device->hwo);	/* DRV_QUERYDSOUNDIFACE is a "Wine extension" to get the DSound interface */	if (ds_hw_accel != DS_HW_ACCEL_EMULATION && !forcewave)		waveOutMessage((HWAVEOUT)device->drvdesc.dnDevNode, DRV_QUERYDSOUNDIFACE, (DWORD_PTR)&device->driver, 0);	/* Get driver description */	if (device->driver) {		DWORD wod = device->drvdesc.dnDevNode;		hres = IDsDriver_GetDriverDesc(device->driver,&(device->drvdesc));		device->drvdesc.dnDevNode = wod;		if (FAILED(hres)) {			WARN("IDsDriver_GetDriverDesc failed: %08x/n", hres);			IDsDriver_Release(device->driver);			device->driver = NULL;		}        }        /* if no DirectSound interface available, use WINMM API instead */	if (!device->driver)		device->drvdesc.dwFlags = DSDDESC_DOMMSYSTEMOPEN | DSDDESC_DOMMSYSTEMSETFORMAT;	if (device->drvdesc.dwFlags & DSDDESC_DOMMSYSTEMOPEN)	{		DWORD flags = CALLBACK_FUNCTION;		if (device->driver)			flags |= WAVE_DIRECTSOUND;		hres = mmErr(waveOutOpen(&(device->hwo), device->drvdesc.dnDevNode, device->pwfx, (DWORD_PTR)DSOUND_callback, (DWORD_PTR)device, flags));		if (FAILED(hres)) {			WARN("waveOutOpen failed/n");			if (device->driver)			{				IDsDriver_Release(device->driver);				device->driver = NULL;			}			return hres;		}	}	if (device->driver)		hres = IDsDriver_Open(device->driver);	return hres;}
开发者ID:carlosbislip,项目名称:wine,代码行数:62,


示例4: init

// open & setup audio device// return: 1=success 0=failstatic int init(int rate,int channels,int format,int flags){	WAVEFORMATEXTENSIBLE wformat;      	DWORD totalBufferSize = (BUFFER_SIZE + sizeof(WAVEHDR)) * BUFFER_COUNT;	MMRESULT result;	unsigned char* buffer;	int i;   	switch(format){		case AF_FORMAT_AC3:		case AF_FORMAT_S24_LE:		case AF_FORMAT_S16_LE:		case AF_FORMAT_S8:			break;		default:			mp_msg(MSGT_AO, MSGL_V,"ao_win32: format %s not supported defaulting to Signed 16-bit Little-Endian/n",af_fmt2str_short(format));			format=AF_FORMAT_S16_LE;	}   	// FIXME multichannel mode is buggy	if(channels > 2)		channels = 2;   	//fill global ao_data 	ao_data.channels=channels;	ao_data.samplerate=rate;	ao_data.format=format;	ao_data.bps=channels*rate;	if(format != AF_FORMAT_U8 && format != AF_FORMAT_S8)	  ao_data.bps*=2;	if(ao_data.buffersize==-1)	{		ao_data.buffersize=af_fmt2bits(format)/8;        ao_data.buffersize*= channels;		ao_data.buffersize*= SAMPLESIZE;	}	mp_msg(MSGT_AO, MSGL_V,"ao_win32: Samplerate:%iHz Channels:%i Format:%s/n",rate, channels, af_fmt2str_short(format));    mp_msg(MSGT_AO, MSGL_V,"ao_win32: Buffersize:%d/n",ao_data.buffersize);		//fill waveformatex    ZeroMemory( &wformat, sizeof(WAVEFORMATEXTENSIBLE));    wformat.Format.cbSize          = (channels>2)?sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX):0;    wformat.Format.nChannels       = channels;                    wformat.Format.nSamplesPerSec  = rate;                if(format == AF_FORMAT_AC3)    {        wformat.Format.wFormatTag      = WAVE_FORMAT_DOLBY_AC3_SPDIF;        wformat.Format.wBitsPerSample  = 16;        wformat.Format.nBlockAlign     = 4;    }    else     {        wformat.Format.wFormatTag      = (channels>2)?WAVE_FORMAT_EXTENSIBLE:WAVE_FORMAT_PCM;        wformat.Format.wBitsPerSample  = af_fmt2bits(format);         wformat.Format.nBlockAlign     = wformat.Format.nChannels * (wformat.Format.wBitsPerSample >> 3);    }	if(channels>2)	{        wformat.dwChannelMask = channel_mask[channels-3];        wformat.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;	    wformat.Samples.wValidBitsPerSample=af_fmt2bits(format);    }      wformat.Format.nAvgBytesPerSec = wformat.Format.nSamplesPerSec * wformat.Format.nBlockAlign; 	    //open sound device    //WAVE_MAPPER always points to the default wave device on the system    result = waveOutOpen(&hWaveOut,WAVE_MAPPER,(WAVEFORMATEX*)&wformat,(DWORD_PTR)waveOutProc,0,CALLBACK_FUNCTION);	if(result == WAVERR_BADFORMAT)	{		mp_msg(MSGT_AO, MSGL_ERR,"ao_win32: format not supported switching to default/n");        ao_data.channels = wformat.Format.nChannels = 2;	    ao_data.samplerate = wformat.Format.nSamplesPerSec = 44100;	    ao_data.format = AF_FORMAT_S16_LE;		ao_data.bps=ao_data.channels * ao_data.samplerate*2;	    wformat.Format.wBitsPerSample=16;        wformat.Format.wFormatTag=WAVE_FORMAT_PCM;		wformat.Format.nBlockAlign     = wformat.Format.nChannels * (wformat.Format.wBitsPerSample >> 3);        wformat.Format.nAvgBytesPerSec = wformat.Format.nSamplesPerSec * wformat.Format.nBlockAlign;		ao_data.buffersize=(wformat.Format.wBitsPerSample>>3)*wformat.Format.nChannels*SAMPLESIZE;        result = waveOutOpen(&hWaveOut,WAVE_MAPPER,(WAVEFORMATEX*)&wformat,(DWORD_PTR)waveOutProc,0,CALLBACK_FUNCTION);	}
开发者ID:DanielGit,项目名称:Intrisit201202,代码行数:84,


示例5: WAV_ConfigureOutput

/*we assume what was asked is what we got*/static GF_Err WAV_ConfigureOutput(GF_AudioOutput *dr, u32 *SampleRate, u32 *NbChannels, u32 *nbBitsPerSample, u32 channel_cfg){	u32 i, retry;	HRESULT	hr;	WAVEFORMATEX *fmt;#ifdef USE_WAVE_EXT	WAVEFORMATEXTENSIBLE format_ex;#endif	WAVCTX();	if (!ctx) return GF_BAD_PARAM;	/*reset*/	close_waveform(dr);#ifndef USE_WAVE_EXT	if (*NbChannels>2) *NbChannels=2;#endif	memset (&ctx->fmt, 0, sizeof(ctx->fmt));	ctx->fmt.cbSize = sizeof(WAVEFORMATEX);	ctx->fmt.wFormatTag = WAVE_FORMAT_PCM;	ctx->fmt.nChannels = *NbChannels;	ctx->fmt.wBitsPerSample = *nbBitsPerSample;	ctx->fmt.nSamplesPerSec = *SampleRate;	ctx->fmt.nBlockAlign = ctx->fmt.wBitsPerSample * ctx->fmt.nChannels / 8;	ctx->fmt.nAvgBytesPerSec = *SampleRate * ctx->fmt.nBlockAlign;	fmt = &ctx->fmt;#ifdef USE_WAVE_EXT	if (channel_cfg && ctx->fmt.nChannels>2) {		memset(&format_ex, 0, sizeof(WAVEFORMATEXTENSIBLE));		format_ex.Format = ctx->fmt;		format_ex.Format.cbSize = 22;		format_ex.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;		format_ex.SubFormat = GPAC_KSDATAFORMAT_SUBTYPE_PCM;		format_ex.Samples.wValidBitsPerSample = ctx->fmt.wBitsPerSample;		format_ex.dwChannelMask = 0;		if (channel_cfg & GF_AUDIO_CH_FRONT_LEFT) format_ex.dwChannelMask |= SPEAKER_FRONT_LEFT;		if (channel_cfg & GF_AUDIO_CH_FRONT_RIGHT) format_ex.dwChannelMask |= SPEAKER_FRONT_RIGHT;		if (channel_cfg & GF_AUDIO_CH_FRONT_CENTER) format_ex.dwChannelMask |= SPEAKER_FRONT_CENTER;		if (channel_cfg & GF_AUDIO_CH_LFE) format_ex.dwChannelMask |= SPEAKER_LOW_FREQUENCY;		if (channel_cfg & GF_AUDIO_CH_BACK_LEFT) format_ex.dwChannelMask |= SPEAKER_BACK_LEFT;		if (channel_cfg & GF_AUDIO_CH_BACK_RIGHT) format_ex.dwChannelMask |= SPEAKER_BACK_RIGHT;		if (channel_cfg & GF_AUDIO_CH_BACK_CENTER) format_ex.dwChannelMask |= SPEAKER_BACK_CENTER;		if (channel_cfg & GF_AUDIO_CH_SIDE_LEFT) format_ex.dwChannelMask |= SPEAKER_SIDE_LEFT;		if (channel_cfg & GF_AUDIO_CH_SIDE_RIGHT) format_ex.dwChannelMask |= SPEAKER_SIDE_RIGHT;		fmt = (WAVEFORMATEX *) &format_ex;	}#endif	/* Open a waveform device for output using window callback. */	retry = 10;	while (retry) {		hr = waveOutOpen((LPHWAVEOUT)&ctx->hwo, WAVE_MAPPER, &ctx->fmt, (DWORD) WaveProc, (DWORD) dr,		                 CALLBACK_FUNCTION | WAVE_ALLOWSYNC | WAVE_FORMAT_DIRECT		                );		if (hr == MMSYSERR_NOERROR) break;		/*couldn't open audio*/		if (hr != MMSYSERR_ALLOCATED) return GF_IO_ERR;		retry--;	}	if (hr != MMSYSERR_NOERROR) return GF_IO_ERR;	if (!ctx->force_config) {		/*one wave buffer size*/		ctx->buffer_size = 1024 * ctx->fmt.nBlockAlign;		ctx->num_buffers = 2;	} else {		ctx->num_buffers = ctx->cfg_num_buffers;		ctx->buffer_size = (ctx->fmt.nAvgBytesPerSec * ctx->cfg_duration) / (1000 * ctx->cfg_num_buffers);	}	ctx->event = CreateEvent( NULL, FALSE, FALSE, NULL);	/*make sure we're aligned*/	while (ctx->buffer_size % ctx->fmt.nBlockAlign) ctx->buffer_size++;	ctx->wav_buf = gf_malloc(ctx->buffer_size*ctx->num_buffers*sizeof(char));	memset(ctx->wav_buf, 0, ctx->buffer_size*ctx->num_buffers*sizeof(char));	/*setup wave headers*/	for (i=0 ; i < ctx->num_buffers; i++) {		memset(& ctx->wav_hdr[i], 0, sizeof(WAVEHDR));		ctx->wav_hdr[i].dwBufferLength = ctx->buffer_size;		ctx->wav_hdr[i].lpData = & ctx->wav_buf[i*ctx->buffer_size];		ctx->wav_hdr[i].dwFlags = WHDR_DONE;		waveOutPrepareHeader(ctx->hwo, &ctx->wav_hdr[i], sizeof(WAVEHDR));		waveOutWrite(ctx->hwo, &ctx->wav_hdr[i], sizeof(WAVEHDR));		Sleep(1);	}	ctx->total_length_ms = 1000 * ctx->num_buffers * ctx->buffer_size / ctx->fmt.nAvgBytesPerSec;	/*initial delay is full buffer size*/	ctx->delay = ctx->total_length_ms;	return GF_OK;}
开发者ID:Bevara,项目名称:GPAC,代码行数:99,


示例6: OpenWaveOut

/***************************************************************************** * OpenWaveOut: open the waveout sound device ****************************************************************************/static int OpenWaveOut( aout_instance_t *p_aout, int i_format,                        int i_channels, int i_nb_channels, int i_rate,                        vlc_bool_t b_probe ){    MMRESULT result;    unsigned int i;    /* Set sound format */#define waveformat p_aout->output.p_sys->waveformat    waveformat.dwChannelMask = 0;    for( i = 0; i < sizeof(pi_channels_src)/sizeof(uint32_t); i++ )    {        if( i_channels & pi_channels_src[i] )            waveformat.dwChannelMask |= pi_channels_in[i];    }    switch( i_format )    {    case VLC_FOURCC('s','p','d','i'):        i_nb_channels = 2;        /* To prevent channel re-ordering */        waveformat.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;        waveformat.Format.wBitsPerSample = 16;        waveformat.Samples.wValidBitsPerSample =            waveformat.Format.wBitsPerSample;        waveformat.Format.wFormatTag = WAVE_FORMAT_DOLBY_AC3_SPDIF;        waveformat.SubFormat = __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF;        break;    case VLC_FOURCC('f','l','3','2'):        waveformat.Format.wBitsPerSample = sizeof(float) * 8;        waveformat.Samples.wValidBitsPerSample =            waveformat.Format.wBitsPerSample;        waveformat.Format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;        waveformat.SubFormat = __KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;        break;    case VLC_FOURCC('s','1','6','l'):        waveformat.Format.wBitsPerSample = 16;        waveformat.Samples.wValidBitsPerSample =            waveformat.Format.wBitsPerSample;        waveformat.Format.wFormatTag = WAVE_FORMAT_PCM;        waveformat.SubFormat = __KSDATAFORMAT_SUBTYPE_PCM;        break;    }    waveformat.Format.nChannels = i_nb_channels;    waveformat.Format.nSamplesPerSec = i_rate;    waveformat.Format.nBlockAlign =        waveformat.Format.wBitsPerSample / 8 * i_nb_channels;    waveformat.Format.nAvgBytesPerSec =        waveformat.Format.nSamplesPerSec * waveformat.Format.nBlockAlign;    /* Only use the new WAVE_FORMAT_EXTENSIBLE format for multichannel audio */    if( i_nb_channels <= 2 )    {        waveformat.Format.cbSize = 0;    }    else    {        waveformat.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;        waveformat.Format.cbSize =            sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);    }    /* Open the device */    result = waveOutOpen( &p_aout->output.p_sys->h_waveout, WAVE_MAPPER,                          (WAVEFORMATEX *)&waveformat,                          (DWORD_PTR)WaveOutCallback, (DWORD_PTR)p_aout,                          CALLBACK_FUNCTION | (b_probe?WAVE_FORMAT_QUERY:0) );    if( result == WAVERR_BADFORMAT )    {        msg_Warn( p_aout, "waveOutOpen failed WAVERR_BADFORMAT" );        return VLC_EGENERIC;    }    if( result == MMSYSERR_ALLOCATED )    {        msg_Warn( p_aout, "waveOutOpen failed WAVERR_ALLOCATED" );        return VLC_EGENERIC;    }    if( result != MMSYSERR_NOERROR )    {        msg_Warn( p_aout, "waveOutOpen failed" );        return VLC_EGENERIC;    }    p_aout->output.p_sys->b_chan_reorder =        aout_CheckChannelReorder( pi_channels_in, pi_channels_out,                                  waveformat.dwChannelMask, i_nb_channels,                                  p_aout->output.p_sys->pi_chan_table );    if( p_aout->output.p_sys->b_chan_reorder )    {        msg_Dbg( p_aout, "channel reordering needed" );    }//.........这里部分代码省略.........
开发者ID:forthyen,项目名称:SDesk,代码行数:101,


示例7: SafeDeleteArray

int WaveOutput::Open(int32_t rate, uint32_t chn, int32_t bits, int32_t bufferlenms, int32_t prebufferms){	//int8_t	str[256];	int32_t	error,i;	if (chn>2) return -1;						//No more than stereo	if (open) Close();						//Verify device was closed	//if (cfg.changed)	{		blk.num=4;//cfg.numBlk;		blk.min=2*1024;//cfg.minBlk*1024;		blk.max=64*1024;//cfg.maxBlk*1024;    SafeDeleteArray(&buffer.pOut);    buffer.pOut= new uint8_t[(blk.num * blk.max)];  //}	//Build format structure -------------------	wav.fmt.Format.wFormatTag				= WAVE_FORMAT_EXTENSIBLE;	wav.fmt.Format.nChannels				= chn;	wav.fmt.Format.nSamplesPerSec			= rate;	wav.fmt.Format.wBitsPerSample			= (bits+7)&~7;		//Round bits per sample up to nearest byte	wav.fmt.Format.nBlockAlign			= chn * wav.fmt.Format.wBitsPerSample>>3;	wav.fmt.Format.nAvgBytesPerSec		= wav.fmt.Format.nBlockAlign * wav.fmt.Format.nSamplesPerSec;	wav.fmt.Format.cbSize					= 22;	wav.fmt.Samples.wValidBitsPerSample	= bits;	wav.fmt.dwChannelMask					= chn==2 ? 3 : 4;	//Select left & right (stereo) or center (mono)	wav.fmt.SubFormat						= KSDATAFORMAT_SUBTYPE_PCM;  int cfbufferLen = 2000;	//Check size of ring buffer ----------------	buffer.size=(wav.fmt.Format.nSamplesPerSec *				wav.fmt.Format.nBlockAlign * 				cfbufferLen) / 1000;	if (buffer.asize < buffer.size)				//If allocated size is less than needed size	{		//buffer.pIn=realloc(buffer.pIn,buffer.size);    SafeDeleteArray(&buffer.pIn);    buffer.pIn = new uint8_t[buffer.size];		buffer.asize=buffer.size;	}	//Open audio device ------------------------  int cfg_direct = 0;//WAVE_FORMAT_DIRECT	error=waveOutOpen(&wav.handle,device,(tWAVEFORMATEX*)&wav.fmt,DWORD_PTR(WaveOutDone),DWORD_PTR(this),CALLBACK_FUNCTION|WAVE_ALLOWSYNC|cfg_direct);	switch (error)	{	case(MMSYSERR_NOERROR):		wav.idle=(uint32_t)~0>>(32-blk.num);		//All blocks are free		//Initialize wave blocks ---------------		for (i=0;i<blk.num;i++)		{			memset(&wav.hdr[i],0,sizeof(WAVEHDR));			wav.hdr[i].lpData=(LPSTR)buffer.pOut+(blk.max*i);			wav.hdr[i].dwBufferLength=blk.max;			waveOutPrepareHeader(wav.handle,&wav.hdr[i],sizeof(WAVEHDR));			wav.hdr[i].dwBufferLength=0;			wav.hdr[i].dwUser=(DWORD_PTR)(1<<i);			//Bit corresponding to block			wav.hdr[i].dwFlags|=WHDR_DONE;		}		memset(&wav.time,0,sizeof(MMTIME));		wav.time.wType=TIME_SAMPLES;		wav.samples=0;		wav.smpCnt=0;		buffer.bytes=0;							//No bytes have been written		buffer.length=0;							//Nothing is in input buffer		buffer.queued_length=0;		buffer.write=0;		buffer.read=0;		blk.cnt=0;		blk.size=0;		paused=0;								//Output isn't paused		open=1;								//Device is open		return ((buffer.size / wav.fmt.Format.nBlockAlign) * 1000) / wav.fmt.Format.nSamplesPerSec;	case(MMSYSERR_ALLOCATED):		//sprintf(str,"%s is already in use",wav.cap.szPname);		//MessageBox(outMod.hMainWindow,str,"WaveOut WDM",MB_ICONERROR|MB_OK);		break;	case(MMSYSERR_BADDEVICEID):		//sprintf(str,"Invalid output device ID [%i].  Check your configuration.",cfg.device);		//MessageBox(outMod.hMainWindow,str,"WaveOut WDM",MB_ICONERROR|MB_OK);		break;	case(MMSYSERR_NODRIVER):		//sprintf(str,"No driver is loaded for %s",wav.cap.szPname);		//MessageBox(outMod.hMainWindow,str,"WaveOut WDM",MB_ICONERROR|MB_OK);		break;//.........这里部分代码省略.........
开发者ID:Noplace,项目名称:NesEmu,代码行数:101,


示例8: WINMM_OpenDevice

//.........这里部分代码省略.........            break;        }    }    if (!valid_datatype) {        WINMM_CloseDevice(this);        SDL_SetError("Unsupported audio format");        return 0;    }    /* Set basic WAVE format parameters */    SDL_memset(&waveformat, '/0', sizeof(waveformat));    waveformat.wFormatTag = WAVE_FORMAT_PCM;    waveformat.wBitsPerSample = SDL_AUDIO_BITSIZE(this->spec.format);    if (this->spec.channels > 2)        this->spec.channels = 2;        /* !!! FIXME: is this right? */    waveformat.nChannels = this->spec.channels;    waveformat.nSamplesPerSec = this->spec.freq;    waveformat.nBlockAlign =        waveformat.nChannels * (waveformat.wBitsPerSample / 8);    waveformat.nAvgBytesPerSec =        waveformat.nSamplesPerSec * waveformat.nBlockAlign;    /* Check the buffer size -- minimum of 1/4 second (word aligned) */    if (this->spec.samples < (this->spec.freq / 4))        this->spec.samples = ((this->spec.freq / 4) + 3) & ~3;    /* Update the fragment size as size in bytes */    SDL_CalculateAudioSpec(&this->spec);    /* Open the audio device */    if (iscapture) {        result = waveInOpen(&this->hidden->hin, devId, &waveformat,                             (DWORD_PTR) CaptureSound, (DWORD_PTR) this,                             CALLBACK_FUNCTION);    } else {        result = waveOutOpen(&this->hidden->hout, devId, &waveformat,                             (DWORD_PTR) FillSound, (DWORD_PTR) this,                             CALLBACK_FUNCTION);    }    if (result != MMSYSERR_NOERROR) {        WINMM_CloseDevice(this);        SetMMerror("waveOutOpen()", result);        return 0;    }#ifdef SOUND_DEBUG    /* Check the sound device we retrieved */    {        WAVEOUTCAPS caps;        result = waveOutGetDevCaps((UINT) this->hidden->hout,                                   &caps, sizeof(caps));        if (result != MMSYSERR_NOERROR) {            WINMM_CloseDevice(this);            SetMMerror("waveOutGetDevCaps()", result);            return 0;        }        printf("Audio device: %s/n", caps.szPname);    }#endif    /* Create the audio buffer semaphore */    this->hidden->audio_sem =        CreateSemaphore(NULL, NUM_BUFFERS - 1, NUM_BUFFERS, NULL);    if (this->hidden->audio_sem == NULL) {        WINMM_CloseDevice(this);        SDL_SetError("Couldn't create semaphore");        return 0;    }    /* Create the sound buffers */    this->hidden->mixbuf =        (Uint8 *) SDL_malloc(NUM_BUFFERS * this->spec.size);    if (this->hidden->mixbuf == NULL) {        WINMM_CloseDevice(this);        SDL_OutOfMemory();        return 0;    }    for (i = 0; i < NUM_BUFFERS; ++i) {        SDL_memset(&this->hidden->wavebuf[i], 0,                   sizeof(this->hidden->wavebuf[i]));        this->hidden->wavebuf[i].dwBufferLength = this->spec.size;        this->hidden->wavebuf[i].dwFlags = WHDR_DONE;        this->hidden->wavebuf[i].lpData =            (LPSTR) & this->hidden->mixbuf[i * this->spec.size];        result = waveOutPrepareHeader(this->hidden->hout,                                      &this->hidden->wavebuf[i],                                      sizeof(this->hidden->wavebuf[i]));        if (result != MMSYSERR_NOERROR) {            WINMM_CloseDevice(this);            SetMMerror("waveOutPrepareHeader()", result);            return 0;        }    }    return 1;                   /* Ready to go! */}
开发者ID:BoonsNaibot,项目名称:kivy-ios,代码行数:101,


示例9: winwave_init_out

static int winwave_init_out (HWVoiceOut *hw, struct audsettings *as){    int i;    int err;    MMRESULT mr;    WAVEFORMATEX wfx;    WaveVoiceOut *wave;    wave = (WaveVoiceOut *) hw;    InitializeCriticalSection (&wave->crit_sect);    err = waveformat_from_audio_settings (&wfx, as);    if (err) {        goto err0;    }    mr = waveOutOpen (&wave->hwo, WAVE_MAPPER, &wfx,                      (DWORD_PTR) winwave_callback_out,                      (DWORD_PTR) wave, CALLBACK_FUNCTION);    if (mr != MMSYSERR_NOERROR) {        winwave_logerr (mr, "waveOutOpen");        goto err1;    }    wave->hdrs = audio_calloc (AUDIO_FUNC, conf.dac_headers,                               sizeof (*wave->hdrs));    if (!wave->hdrs) {        goto err2;    }    audio_pcm_init_info (&hw->info, as);    hw->samples = conf.dac_samples * conf.dac_headers;    wave->avail = hw->samples;    wave->pcm_buf = audio_calloc (AUDIO_FUNC, conf.dac_samples,                                  conf.dac_headers << hw->info.shift);    if (!wave->pcm_buf) {        goto err3;    }    for (i = 0; i < conf.dac_headers; ++i) {        WAVEHDR *h = &wave->hdrs[i];        h->dwUser = 0;        h->dwBufferLength = conf.dac_samples << hw->info.shift;        h->lpData = advance (wave->pcm_buf, i * h->dwBufferLength);        h->dwFlags = 0;        mr = waveOutPrepareHeader (wave->hwo, h, sizeof (*h));        if (mr != MMSYSERR_NOERROR) {            winwave_logerr (mr, "waveOutPrepareHeader(%d)", i);            goto err4;        }    }    return 0; err4:    g_free (wave->pcm_buf); err3:    g_free (wave->hdrs); err2:    winwave_anal_close_out (wave); err1: err0:    return -1;}
开发者ID:0bliv10n,项目名称:s2e,代码行数:68,


示例10: FMUSIC_PlaySong

signed char FMUSIC_PlaySong(FMUSIC_MODULE *mod){	int				count;	FMUSIC_CHANNEL	*cptr;	int				totalblocks; 	if (!mod)     {		return FALSE;    }	lastmodplay = mod ;	volumeUpdateSpeed = 30 ;	FMUSIC_StopSong(mod);	if (!FSOUND_File_OpenCallback || !FSOUND_File_CloseCallback || !FSOUND_File_ReadCallback || !FSOUND_File_SeekCallback || !FSOUND_File_TellCallback)    {		return FALSE;    }	// ========================================================================================================	// INITIALIZE SOFTWARE MIXER 	// ========================================================================================================	FSOUND_OOMixRate    = 1.0f / (float)FSOUND_MixRate;	FSOUND_BlockSize    = ((FSOUND_MixRate * FSOUND_LATENCY / 1000) + 3) & 0xFFFFFFFC;	// Number of *samples*	FSOUND_BufferSize   = FSOUND_BlockSize * (FSOUND_BufferSizeMs / FSOUND_LATENCY);	// make it perfectly divisible by granularity	FSOUND_BufferSize <<= 1;	// double buffer	mix_volumerampsteps      = FSOUND_MixRate * FSOUND_VOLUMERAMP_STEPS / 44100;	mix_1overvolumerampsteps = 1.0f / mix_volumerampsteps;    totalblocks              = FSOUND_BufferSize / FSOUND_BlockSize;	//=======================================================================================	// ALLOC GLOBAL CHANNEL POOL	//=======================================================================================	memset(FSOUND_Channel, 0, sizeof(FSOUND_CHANNEL) * 256);	// ========================================================================================================	// SET UP CHANNELS	// ========================================================================================================	for (count=0; count < 256; count++)	{		FSOUND_Channel[count].index = count;		FSOUND_Channel[count].speedhi = 1;	}	mod->globalvolume       = mod->defaultglobalvolume;	mod->globalWantedVolume = mod->globalvolume ; 	mod->speed              = (int)mod->defaultspeed;	mod->row                = 0;	mod->order              = 0;	mod->nextorder          = -1;	mod->nextrow            = -1;	mod->mixer_samplesleft  = 0;	mod->tick               = 0;	mod->patterndelay       = 0;	mod->time_ms            = 0;	FMUSIC_SetBPM(mod, mod->defaultbpm);	memset(FMUSIC_Channel, 0, mod->numchannels * sizeof(FMUSIC_CHANNEL));//	memset(FSOUND_Channel, 0, 256 * sizeof(FSOUND_CHANNEL));	for (count=0; count < mod->numchannels; count++)	{		cptr = &FMUSIC_Channel[count];		cptr->cptr = &FSOUND_Channel[count];	}	FMUSIC_PlayingSong = mod;	FMUSIC_TimeInfo = FSOUND_Memory_Calloc(sizeof(FMUSIC_TIMMEINFO) * totalblocks);	// ========================================================================================================	// PREPARE THE OUTPUT	// ========================================================================================================	{		WAVEFORMATEX	pcmwf;		UINT			hr;		// ========================================================================================================		// INITIALIZE WAVEOUT		// ========================================================================================================		pcmwf.wFormatTag		= WAVE_FORMAT_PCM; 		pcmwf.nChannels			= 2;		pcmwf.wBitsPerSample	= 16; 		pcmwf.nBlockAlign		= pcmwf.nChannels * pcmwf.wBitsPerSample / 8;		pcmwf.nSamplesPerSec	= FSOUND_MixRate;		pcmwf.nAvgBytesPerSec	= pcmwf.nSamplesPerSec * pcmwf.nBlockAlign; 		pcmwf.cbSize			= 0;		hr = waveOutOpen(&FSOUND_WaveOutHandle, WAVE_MAPPER, &pcmwf, 0, 0, 0);		if (hr)         {			return FALSE;        }	}//.........这里部分代码省略.........
开发者ID:r043v,项目名称:dstar,代码行数:101,


示例11: Init

    s32 Init()    {        numBuffers = Config_WaveOut.NumBuffers;        MMRESULT woores;        if (Test())            return -1;// TODO : Use dsound to determine the speaker configuration, and expand audio from there.#if 0		int speakerConfig;		//if( StereoExpansionEnabled )			speakerConfig = 2;  // better not mess with this in wavout :p (rama)		// Any windows driver should support stereo at the software level, I should think!		pxAssume( speakerConfig > 1 );		LPTHREAD_START_ROUTINE threadproc;		switch( speakerConfig )		{		case 2:			ConLog( "* SPU2 > Using normal 2 speaker stereo output./n" );			threadproc = (LPTHREAD_START_ROUTINE)&RThread<StereoOut16>;			speakerConfig = 2;		break;		case 4:			ConLog( "* SPU2 > 4 speaker expansion enabled [quadraphenia]/n" );			threadproc = (LPTHREAD_START_ROUTINE)&RThread<StereoQuadOut16>;			speakerConfig = 4;		break;		case 6:		case 7:			ConLog( "* SPU2 > 5.1 speaker expansion enabled./n" );			threadproc = (LPTHREAD_START_ROUTINE)&RThread<Stereo51Out16>;			speakerConfig = 6;		break;		default:			ConLog( "* SPU2 > 7.1 speaker expansion enabled./n" );			threadproc = (LPTHREAD_START_ROUTINE)&RThread<Stereo51Out16>;			speakerConfig = 8;		break;		}#endif        wformat.wFormatTag = WAVE_FORMAT_PCM;        wformat.nSamplesPerSec = SampleRate;        wformat.wBitsPerSample = 16;        wformat.nChannels = 2;        wformat.nBlockAlign = ((wformat.wBitsPerSample * wformat.nChannels) / 8);        wformat.nAvgBytesPerSec = (wformat.nSamplesPerSec * wformat.nBlockAlign);        wformat.cbSize = 0;        qbuffer = new StereoOut16[BufferSize * numBuffers];        woores = waveOutOpen(&hwodevice, WAVE_MAPPER, &wformat, 0, 0, 0);        if (woores != MMSYSERR_NOERROR) {            waveOutGetErrorText(woores, (wchar_t *)&ErrText, 255);            SysMessage("WaveOut Error: %s", ErrText);            return -1;        }        const int BufferSizeBytes = wformat.nBlockAlign * BufferSize;        for (u32 i = 0; i < numBuffers; i++) {            whbuffer[i].dwBufferLength = BufferSizeBytes;            whbuffer[i].dwBytesRecorded = BufferSizeBytes;            whbuffer[i].dwFlags = 0;            whbuffer[i].dwLoops = 0;            whbuffer[i].dwUser = 0;            whbuffer[i].lpData = (LPSTR)QBUFFER(i);            whbuffer[i].lpNext = 0;            whbuffer[i].reserved = 0;            waveOutPrepareHeader(hwodevice, whbuffer + i, sizeof(WAVEHDR));            whbuffer[i].dwFlags |= WHDR_DONE;  //avoid deadlock        }        // Start Thread        // [Air]: The waveout code does not use wait objects, so setting a time critical        // priority level is a bad idea.  Standard priority will do fine.  The buffer will get the        // love it needs and won't suck resources idling pointlessly.  Just don't try to        // run it in uber-low-latency mode.        waveout_running = true;        thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)RThread<StereoOut16>, this, 0, &tid);        return 0;    }
开发者ID:KitoHo,项目名称:pcsx2,代码行数:92,


示例12: SNDDMA_InitWav

//Crappy windows multimedia basestatic qbool SNDDMA_InitWav (void){	WAVEFORMATEX format;	int i;	HRESULT hr;	UINT_PTR devicenum;	int temp;	snd_sent = 0;	snd_completed = 0;	memset((void *)shm, 0, sizeof(*shm));	shm->format.channels = 2;	shm->format.width = 2;	shm->format.speed = (s_khz.value == 44) ? 44100 : (s_khz.value == 22) ? 22050 : 11025;	memset (&format, 0, sizeof(format));	format.wFormatTag = WAVE_FORMAT_PCM;	format.nChannels = shm->format.channels;	format.wBitsPerSample = shm->format.width * 8;	format.nSamplesPerSec = shm->format.speed;	format.nBlockAlign = format.nChannels * format.wBitsPerSample / 8;	format.cbSize = 0;	format.nAvgBytesPerSec = format.nSamplesPerSec * format.nBlockAlign;	devicenum = WAVE_MAPPER;	if ((temp = COM_CheckParm("-snddev")) && temp + 1 < COM_Argc())		devicenum = Q_atoi(COM_Argv(temp + 1));	hr = waveOutOpen((LPHWAVEOUT) &hWaveOut, devicenum, &format, 0, 0L, CALLBACK_NULL);	if (hr != MMSYSERR_NOERROR && devicenum != WAVE_MAPPER) {		Com_Printf ("Couldn't open preferred sound device. Falling back to primary sound device./n");		hr = waveOutOpen((LPHWAVEOUT)&hWaveOut, WAVE_MAPPER, &format, 0, 0L, CALLBACK_NULL);	}	/* Open a waveform device for output using window callback. */	if (hr != MMSYSERR_NOERROR) {		if (hr == MMSYSERR_ALLOCATED)			Com_Printf ("waveOutOpen failed, hardware already in use/n");		else			Com_Printf ("waveOutOpen failed/n");		return false;	}	/*	 * Allocate and lock memory for the waveform data. The memory 	 * for waveform data must be globally allocated with 	 * GMEM_MOVEABLE and GMEM_SHARE flags.	 */	gSndBufSize = WAV_BUFFERS*WAV_BUFFER_SIZE;	hData = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, gSndBufSize);	if (!hData) {		Com_Printf ("Sound: Out of memory./n");		FreeSound ();		return false;	}	lpData = GlobalLock(hData);	if (!lpData) {		Com_Printf ("Sound: Failed to lock./n");		FreeSound ();		return false;	}	memset (lpData, 0, gSndBufSize);	/*	 * Allocate and lock memory for the header. This memory must 	 * also be globally allocated with GMEM_MOVEABLE and 	 * GMEM_SHARE flags. 	 */	hWaveHdr = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE,	                       (DWORD) sizeof(WAVEHDR) * WAV_BUFFERS);	if (hWaveHdr == NULL) {		Com_Printf ("Sound: Failed to Alloc header./n");		FreeSound ();		return false;	}	lpWaveHdr = (LPWAVEHDR) GlobalLock(hWaveHdr);	if (lpWaveHdr == NULL) {		Com_Printf ("Sound: Failed to lock header./n");		FreeSound ();		return false;	}	memset (lpWaveHdr, 0, sizeof(WAVEHDR) * WAV_BUFFERS);	/* After allocation, set up and prepare headers. */	for (i = 0; i < WAV_BUFFERS; i++) {		lpWaveHdr[i].dwBufferLength = WAV_BUFFER_SIZE;		lpWaveHdr[i].lpData = lpData + i*WAV_BUFFER_SIZE;		if (waveOutPrepareHeader(hWaveOut, lpWaveHdr+i, sizeof(WAVEHDR)) != MMSYSERR_NOERROR) {			Com_Printf ("Sound: failed to prepare wave headers/n");			FreeSound ();			return false;		}//.........这里部分代码省略.........
开发者ID:se-sss,项目名称:ezquake-source,代码行数:101,


示例13: openPcmDevice

PcmDevice *openPcmDevice (int errorLevel, const char *device) {  PcmDevice *pcm;  MMRESULT mmres;  WAVEOUTCAPS caps;  int id = 0;  if (*device) {    if (!isInteger(&id, device) || (id < 0) || (id >= waveOutGetNumDevs())) {      logMessage(errorLevel, "invalid PCM device number: %s", device);      return NULL;    }  }  if (!(pcm = malloc(sizeof(*pcm)))) {    logSystemError("PCM device allocation");    return NULL;  }  pcm->deviceID = id;  if ((waveOutGetDevCaps(pcm->deviceID, &caps, sizeof(caps))) != MMSYSERR_NOERROR)    pcm->format = defaultFormat;  else {    logMessage(errorLevel, "PCM device %d is %s", pcm->deviceID, caps.szPname);    pcm->format.wFormatTag = WAVE_FORMAT_PCM;    if (caps.dwFormats & 	(WAVE_FORMAT_1S08	|WAVE_FORMAT_1S16	|WAVE_FORMAT_2S08	|WAVE_FORMAT_2S16	|WAVE_FORMAT_4S08	|WAVE_FORMAT_4S16))      pcm->format.nChannels = 2;    else      pcm->format.nChannels = 1;    if (caps.dwFormats &	(WAVE_FORMAT_4M08	|WAVE_FORMAT_4M16	|WAVE_FORMAT_4S08	|WAVE_FORMAT_4S16))      pcm->format.nSamplesPerSec = 44100;    else if (caps.dwFormats &	(WAVE_FORMAT_2M08	|WAVE_FORMAT_2M16	|WAVE_FORMAT_2S08	|WAVE_FORMAT_2S16))      pcm->format.nSamplesPerSec = 22050;    else if (caps.dwFormats &	(WAVE_FORMAT_1M08	|WAVE_FORMAT_1M16	|WAVE_FORMAT_1S08	|WAVE_FORMAT_1S16))      pcm->format.nSamplesPerSec = 11025;    else {      logMessage(errorLevel, "unknown PCM capability %#lx", caps.dwFormats);      goto out;    }    if (caps.dwFormats &	(WAVE_FORMAT_1M16	|WAVE_FORMAT_1S16	|WAVE_FORMAT_2M16	|WAVE_FORMAT_2S16	|WAVE_FORMAT_4M16	|WAVE_FORMAT_4S16))      pcm->format.wBitsPerSample = 16;    else if (caps.dwFormats &	(WAVE_FORMAT_1M08	|WAVE_FORMAT_1S08	|WAVE_FORMAT_2M08	|WAVE_FORMAT_2S08	|WAVE_FORMAT_4M08	|WAVE_FORMAT_4S08))      pcm->format.wBitsPerSample = 8;    else {      logMessage(LOG_ERR, "unknown PCM capability %#lx", caps.dwFormats);      goto out;    }    recomputeWaveOutFormat(&pcm->format);    pcm->format.cbSize = 0;  }  if (!(pcm->done = CreateEvent(NULL, FALSE, TRUE, NULL))) {    logWindowsSystemError("creating PCM completion event");    goto out;  }  pcm->waveHdr = initWaveHdr;  pcm->bufSize = 0;  if ((mmres = waveOutOpen(&pcm->handle, pcm->deviceID,	  &pcm->format, (DWORD) pcm->done, 0, CALLBACK_EVENT)) != MMSYSERR_NOERROR) {    LogWaveOutError(mmres, errorLevel, "opening PCM device");    goto outEvent;  }  return pcm;outEvent:  CloseHandle(pcm->done);out:  free(pcm);//.........这里部分代码省略.........
开发者ID:hinderer,项目名称:brltty,代码行数:101,


示例14: wodOpen

static	DWORD	wodOpen(DWORD_PTR *lpdwUser, LPWAVEOPENDESC lpDesc, DWORD dwFlags){    UINT 		ndlo, ndhi;    UINT		i;    WAVEMAPDATA*	wom = HeapAlloc(GetProcessHeap(), 0, sizeof(WAVEMAPDATA));    DWORD               res;    TRACE("(%p %p %08x)/n", lpdwUser, lpDesc, dwFlags);    if (!wom) {        WARN("no memory/n");	return MMSYSERR_NOMEM;    }    ndhi = waveOutGetNumDevs();    if (dwFlags & WAVE_MAPPED) {	if (lpDesc->uMappedDeviceID >= ndhi) {            WARN("invalid parameter: dwFlags WAVE_MAPPED/n");            HeapFree(GetProcessHeap(), 0, wom);            return MMSYSERR_INVALPARAM;        }	ndlo = lpDesc->uMappedDeviceID;	ndhi = ndlo + 1;	dwFlags &= ~WAVE_MAPPED;    } else {	ndlo = 0;    }    wom->self = wom;    wom->dwCallback = lpDesc->dwCallback;    wom->dwFlags = dwFlags;    wom->dwClientInstance = lpDesc->dwInstance;    wom->u.out.hOuterWave = (HWAVEOUT)lpDesc->hWave;    wom->avgSpeedOuter = wom->avgSpeedInner = lpDesc->lpFormat->nAvgBytesPerSec;    wom->nSamplesPerSecOuter = wom->nSamplesPerSecInner = lpDesc->lpFormat->nSamplesPerSec;    for (i = ndlo; i < ndhi; i++) {	/* if no ACM stuff is involved, no need to handle callbacks at this	 * level, this will be done transparently	 */        if (waveOutOpen(&wom->u.out.hInnerWave, i, lpDesc->lpFormat,                        (DWORD_PTR)wodCallback, (DWORD_PTR)wom,                        (dwFlags & ~CALLBACK_TYPEMASK) | CALLBACK_FUNCTION | WAVE_FORMAT_DIRECT) == MMSYSERR_NOERROR) {	    wom->hAcmStream = 0;	    goto found;	}    }    if ((dwFlags & WAVE_FORMAT_DIRECT) == 0) {        WAVEFORMATEX	wfx;        wfx.wFormatTag = WAVE_FORMAT_PCM;        wfx.cbSize = 0; /* normally, this field is not used for PCM format, just in case */        /* try some ACM stuff */#define	TRY(sps,bps)    wfx.nSamplesPerSec = (sps); wfx.wBitsPerSample = (bps); /                        switch (res=wodOpenHelper(wom, i, lpDesc, &wfx, dwFlags | WAVE_FORMAT_DIRECT)) { /                            case MMSYSERR_NOERROR: wom->avgSpeedInner = wfx.nAvgBytesPerSec; wom->nSamplesPerSecInner = wfx.nSamplesPerSec; goto found; /                            case WAVERR_BADFORMAT: break; /                            default: goto error; /                        }        if (lpDesc->lpFormat->wFormatTag != WAVE_FORMAT_PCM) {            /* Format changed so keep sample rate and number of channels              * the same and just change the bit depth             */            for (i = ndlo; i < ndhi; i++) {                wfx.nSamplesPerSec=lpDesc->lpFormat->nSamplesPerSec;                wfx.nChannels = lpDesc->lpFormat->nChannels;                TRY(wfx.nSamplesPerSec, 16);                TRY(wfx.nSamplesPerSec, 8);            }        } else {            /* Our resampling algorithm is quite primitive so first try             * to just change the bit depth and number of channels             */            for (i = ndlo; i < ndhi; i++) {                wfx.nSamplesPerSec=lpDesc->lpFormat->nSamplesPerSec;                wfx.nChannels = lpDesc->lpFormat->nChannels;                TRY(wfx.nSamplesPerSec, 16);                TRY(wfx.nSamplesPerSec, 8);                wfx.nChannels ^= 3;                TRY(wfx.nSamplesPerSec, 16);                TRY(wfx.nSamplesPerSec, 8);            }            for (i = ndlo; i < ndhi; i++) {                /* first try with same stereo/mono option as source */                wfx.nChannels = lpDesc->lpFormat->nChannels;                TRY(96000, 16);                TRY(48000, 16);                TRY(44100, 16);                TRY(22050, 16);                TRY(11025, 16);                /* 2^3 => 1, 1^3 => 2, so if stereo, try mono (and the other way around) */                wfx.nChannels ^= 3;                TRY(96000, 16);                TRY(48000, 16);                TRY(44100, 16);                TRY(22050, 16);//.........这里部分代码省略.........
开发者ID:Strongc,项目名称:reactos,代码行数:101,


示例15: SoundDev_Init

FRBC2CI_API int SoundDev_Init(){	WAVEFORMATEX format; 	int i, j;	HRESULT hr;	short *buf;	snd_sent=0;	snd_completed=0;	memset(&format, 0, sizeof(format));	format.wFormatTag=WAVE_FORMAT_PCM;	format.nChannels=1;	format.wBitsPerSample=16;	format.nSamplesPerSec=44100;	format.nBlockAlign=format.nChannels*format.wBitsPerSample/8;	format.cbSize=0;	format.nAvgBytesPerSec=format.nSamplesPerSec*format.nBlockAlign; 		/* Open a waveform device for output using window callback. */ 	while((hr=waveOutOpen((LPHWAVEOUT)&hWaveOut, WAVE_MAPPER, 		&format, 0, 0L, CALLBACK_NULL))!=MMSYSERR_NOERROR)	{		printf("waveOutOpen failed/n");		return(-1);	} 	gSndBufSize=WAV_BUFFERS*WAV_BUFFER_SIZE;	hData=GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, gSndBufSize); 	if(!hData) 	{ 		printf("Sound: Out of memory./n");		SoundDev_DeInit();		return(-1); 	}	lpData=GlobalLock(hData);	if(!lpData)	{ 		printf("Sound: Failed to lock./n");		SoundDev_DeInit();		return(-1); 	} 	memset(lpData, 0, gSndBufSize);	hWaveHdr=GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, 		(DWORD) sizeof(WAVEHDR)*WAV_BUFFERS); 	if(hWaveHdr==NULL)	{ 		printf("Sound: Failed to Alloc header./n");		SoundDev_DeInit();		return(-1); 	} 	lpWaveHdr =(LPWAVEHDR) GlobalLock(hWaveHdr); 	if(lpWaveHdr==NULL)	{ 		printf("Sound: Failed to lock header./n");		SoundDev_DeInit();		return(-1); 	}	memset(lpWaveHdr, 0, sizeof(WAVEHDR)*WAV_BUFFERS);	/* After allocation, set up and prepare headers. */ 	for(i=0; i<WAV_BUFFERS; i++)	{		lpWaveHdr[i].dwBufferLength=WAV_BUFFER_SIZE; 		lpWaveHdr[i].lpData=lpData+i*WAV_BUFFER_SIZE;		if(waveOutPrepareHeader(hWaveOut, lpWaveHdr+i, sizeof(WAVEHDR)) !=				MMSYSERR_NOERROR)		{			printf("Sound: failed to prepare wave headers/n");			SoundDev_DeInit();			return(-1);		}	}	samples=gSndBufSize/(16/8);	sample16 =(16/8)-1;	return(0);}
开发者ID:cr88192,项目名称:bgbtech_engine,代码行数:85,


示例16: lock

BOOL COutput::Open(int nChannels, int nSamplingRate, int nBitsPerSample){	MMRESULT mmr;	int i, nCount = 0;		CAutoLock lock(&m_csecDevice);	if (!waveOutGetNumDevs())		goto fail;	if (nChannels == m_pcm.wf.nChannels &&		(int)m_pcm.wf.nSamplesPerSec == nSamplingRate &&		m_pcm.wBitsPerSample == nBitsPerSample)		return TRUE;	CloseAll();	m_cbBuf = BUFLEN_BASE;	if (nSamplingRate > 11025)		m_cbBuf *= 2;	if (nSamplingRate > 22050)		m_cbBuf *= 2;	if (nChannels > 1)		m_cbBuf *= 2;	if (nBitsPerSample > 8)		m_cbBuf *= 2;	m_pcm.wf.wFormatTag = WAVE_FORMAT_PCM;	m_pcm.wf.nChannels = nChannels;	m_pcm.wf.nSamplesPerSec = nSamplingRate;	m_pcm.wf.nAvgBytesPerSec = nBitsPerSample * nSamplingRate * nChannels / 8;	m_pcm.wf.nBlockAlign = nBitsPerSample * nChannels / 8;	m_pcm.wBitsPerSample = nBitsPerSample;	for (i = 0; i < 10; i++) {		if (m_fDoubleBuf)			mmr = waveOutOpen(&m_hwo, WAVE_MAPPER, (LPWAVEFORMATEX)&m_pcm, (DWORD)WaveOutCallback2, 0, CALLBACK_FUNCTION);		else			mmr = waveOutOpen(&m_hwo, WAVE_MAPPER, (LPWAVEFORMATEX)&m_pcm, (DWORD)WaveOutCallback, 0, CALLBACK_FUNCTION);		if (mmr == MMSYSERR_NOERROR)			break;		else if (mmr != MMSYSERR_ALLOCATED)			goto fail;		Sleep(100);	}	m_fPaused = FALSE;	m_dwWritten = 0;	waveOutSetVolume(m_hwo, m_dwVolume);	if (!PrepareBuffer())		goto fail;	if (m_fDoubleBuf && !PrepareSubBuffer())		goto fail;	m_nLPeek = 0;	m_nRPeek = 0;	if (m_fFade) {		m_nFadeCurrent = FADE_BASE << FADE_BITS;		m_nFadeSamples = m_pcm.wf.nSamplesPerSec * FADE_TIME / 1000;		m_nFadeRate = (int)((((double)1 - FADE_BASE) / m_nFadeSamples) * (1 << FADE_BITS));		m_nFadeRate += 1;	}	return TRUE;fail:	CloseAll();	return FALSE;}
开发者ID:afriza,项目名称:GSPlayer,代码行数:72,


示例17: printf

/*-------------------------------------------------------------------------------	FUNCTION:		receiveStream----	DATE:			2009-04-06----	REVISIONS:		2009-04-06 - Jaymz, Took out the TCP connection stuff since--								 we already have that at this point. Also added--								 a parameter WPARAM sd, which is the socket--								 from which we are receiving the data.--							   - Jaymz, Miscellaneous code touch-ups (mainly--								 formatting and removing of test printf()'s)----	DESIGNER(S):	David Overton--	PROGRAMMER(S):	David Overton, Jaymz Boilard, Steffen L. Norgren----	INTERFACE:		receiveStream(LPVOID iValue)----	RETURNS:		void----	NOTES: The main function to receive a UDP stream of data and process--	that information.-----------------------------------------------------------------------------*/DWORD WINAPI receiveStream(LPVOID iValue){	WAVEFORMATEX	wfx;	char			buffer[BLOCK_SIZE]; /* intermediate buffer for reading */	int				i, n, remote_len;	DWORD			outBytes = 0;	char			* play_byte = "1";	BOOL			firstRun = TRUE;	remote_len = sizeof(udp_remote);	/* initialise the module variables */	waveBlocks			= allocateBlocks(BLOCK_SIZE, BLOCK_COUNT);	waveFreeBlockCount	= BLOCK_COUNT;	waveCurrentBlock	= 0;	InitializeCriticalSection(&waveCriticalSection);		/* playback loop - read from socket */	while (TRUE) 	{		if (ci.request != MULTI_STREAM) {			/* send play signal */			sendto(ci.udpSocket, play_byte, sizeof(play_byte), 0, (struct sockaddr *)&udp_remote, remote_len);		}		if ((n = recvfrom(ci.udpSocket, buffer, sizeof(buffer), 0, (struct sockaddr *)&udp_remote, &remote_len)) <= 0)		{			waveOutClose(hWaveOut);			ExitThread(0);		}		/* first 4 bytes in a file, so set the header information */		if(strncmp(buffer, "RIFF", 4) == 0)		{			memcpy(&wfx, buffer+20, sizeof(wfx));			if (ci.request != MULTI_STREAM || firstRun == TRUE) {				waveOutClose(hWaveOut);							if(waveOutOpen(&hWaveOut, WAVE_MAPPER, &wfx, (DWORD_PTR)waveOutProc,					(DWORD_PTR)&waveFreeBlockCount, CALLBACK_FUNCTION) != MMSYSERR_NOERROR)				{						MessageBox(NULL, "Unable to open mapper device.", "Error", MB_OK);						ExitProcess(1);				}				firstRun = FALSE;			}		}		if(n == 0)			break;		else if(n < sizeof(buffer) && n != WAVE_HEAD_SIZE)		{			memset(buffer + n, 0, sizeof(buffer) - n);			writeAudio(buffer, n);			break;		}		writeAudio(buffer, n);	}	/* wait for all blocks to complete */	while(waveFreeBlockCount < BLOCK_COUNT)		Sleep(10);	/* unprepare any blocks that are still prepared */	for(i = 0; i < waveFreeBlockCount; i++)	{		if(waveBlocks[i].dwFlags & WHDR_PREPARED)			waveOutUnprepareHeader(hWaveOut, &waveBlocks[i], sizeof(WAVEHDR));	}	DeleteCriticalSection(&waveCriticalSection);	freeBlocks(waveBlocks);	waveOutClose(hWaveOut);	streamInProgress = FALSE;	ExitThread(0);}
开发者ID:AshuDassanRepo,项目名称:bcit-courses,代码行数:99,


示例18: StartStream

UINT8 StartStream(UINT8 DeviceID){    UINT32 RetVal;#ifdef USE_LIBAO    ao_sample_format ao_fmt;#else#ifdef WIN32    UINT16 Cnt;    HANDLE WaveOutThreadHandle;    DWORD WaveOutThreadID;    //char TestStr[0x80];#elif defined(__NetBSD__)    struct audio_info AudioInfo;#else    UINT32 ArgVal;#endif#endif	// ! USE_LIBAO    if (WaveOutOpen)        return 0xD0;	// Thread is already active    // Init Audio    WaveFmt.wFormatTag = WAVE_FORMAT_PCM;    WaveFmt.nChannels = 2;    WaveFmt.nSamplesPerSec = SampleRate;    WaveFmt.wBitsPerSample = 16;    WaveFmt.nBlockAlign = WaveFmt.wBitsPerSample * WaveFmt.nChannels / 8;    WaveFmt.nAvgBytesPerSec = WaveFmt.nSamplesPerSec * WaveFmt.nBlockAlign;    WaveFmt.cbSize = 0;    if (DeviceID == 0xFF)        return 0x00;#if defined(WIN32) || defined(USE_LIBAO)    BUFFERSIZE = SampleRate / 100 * SAMPLESIZE;    if (BUFFERSIZE > BUFSIZE_MAX)        BUFFERSIZE = BUFSIZE_MAX;#else    BUFFERSIZE = 1 << BUFSIZELD;#endif    SMPL_P_BUFFER = BUFFERSIZE / SAMPLESIZE;    if (AUDIOBUFFERU > AUDIOBUFFERS)        AUDIOBUFFERU = AUDIOBUFFERS;    PauseThread = true;    ThreadPauseConfrm = false;    CloseThread = false;    StreamPause = false;#ifndef USE_LIBAO#ifdef WIN32    ThreadPauseEnable = true;    WaveOutThreadHandle = CreateThread(NULL, 0x00, &WaveOutThread, NULL, 0x00,                                       &WaveOutThreadID);    if(WaveOutThreadHandle == NULL)        return 0xC8;		// CreateThread failed    CloseHandle(WaveOutThreadHandle);    RetVal = waveOutOpen(&hWaveOut, ((UINT)DeviceID - 1), &WaveFmt, 0x00, 0x00, CALLBACK_NULL);    if(RetVal != MMSYSERR_NOERROR)#else    ThreadPauseEnable = false;#ifdef __NetBSD__    hWaveOut = open("/dev/audio", O_WRONLY);#else    hWaveOut = open("/dev/dsp", O_WRONLY);#endif    if (hWaveOut < 0)#endif#else	// ifdef USE_LIBAO    ao_initialize();    ThreadPauseEnable = false;    ao_fmt.bits = WaveFmt.wBitsPerSample;    ao_fmt.rate = WaveFmt.nSamplesPerSec;    ao_fmt.channels = WaveFmt.nChannels;    ao_fmt.byte_format = AO_FMT_NATIVE;    ao_fmt.matrix = NULL;    dev_ao = ao_open_live(ao_default_driver_id(), &ao_fmt, NULL);    if (dev_ao == NULL)#endif    {        CloseThread = true;        return 0xC0;		// waveOutOpen failed    }    WaveOutOpen = true;    //sprintf(TestStr, "Buffer 0,0:/t%p/nBuffer 0,1:/t%p/nBuffer 1,0:/t%p/nBuffer 1,1:/t%p/n",    //		&BufferOut[0][0], &BufferOut[0][1], &BufferOut[1][0], &BufferOut[1][1]);    //AfxMessageBox(TestStr);#ifndef USE_LIBAO#ifdef WIN32    for (Cnt = 0x00; Cnt < AUDIOBUFFERU; Cnt ++)    {        WaveHdrOut[Cnt].lpData = BufferOut[Cnt];	// &BufferOut[Cnt][0x00];        WaveHdrOut[Cnt].dwBufferLength = BUFFERSIZE;        WaveHdrOut[Cnt].dwBytesRecorded = 0x00;        WaveHdrOut[Cnt].dwUser = 0x00;        WaveHdrOut[Cnt].dwFlags = 0x00;        WaveHdrOut[Cnt].dwLoops = 0x00;//.........这里部分代码省略.........
开发者ID:codeman38,项目名称:vgmplay,代码行数:101,


示例19: while

unsigned int WavePlayer::PlayThreadProcImpl(){	/// 定义为寄存器变量,因为它将会被高频率的使用,用于编译器优化	register    ThreadMsg       tmsg = TMSG_ALIVE;	/// 线程循环	while (tmsg)	{		// 每次循环后,交出CPU控制权,放在此处,因为下面有continue语句		Sleep(10);		/// 首先检查线程消息		EnterCriticalSection(&m_cs);		tmsg = m_msgPlayThread;		LeaveCriticalSection(&m_cs);		// 线程要结束,退出线程循环		if (!tmsg)   break;		// 如果设备为空,表示还没有打开设备,需要打开设备		if (m_hWaveoutDev == NULL)		{			EnterCriticalSection(&m_cs);			MMRESULT mmres = waveOutOpen(&m_hWaveoutDev, WAVE_MAPPER, &m_waveData.wfmtx, (DWORD_PTR)WaveOutProc, (DWORD_PTR)this, CALLBACK_FUNCTION);			LeaveCriticalSection(&m_cs);			if (mmres != MMSYSERR_NOERROR)			{				// failed, try again.				continue;			}		}		// 检查空闲缓存块		EnterCriticalSection(&m_cs);		int free = m_wBlock.wfreeblock;		LeaveCriticalSection(&m_cs);		// 如果没有空闲的缓存了,等待...		if (free < BP_TURN)		{			continue;		}		/////////////////////////////////////////////////////////////////////////		/////////////////////////////////////////////////////////////////////////		///                       < 播放主循环 >                              ///		/////////////////////////////////////////////////////////////////////////		/////////////////////////////////////////////////////////////////////////		WAVEHDR     *current = NULL;		/// BP_TURN为每次写入播放队列的块数		for (unsigned int m = 0; m < BP_TURN; m++)		{			/// 当前空闲播放缓存块			current = &m_wBlock.pWaveHdr[m_wBlock.wcurrblock];			// 首先需要检查有没有被Unprepare掉			if (current->dwFlags & WHDR_PREPARED)			{				waveOutUnprepareHeader(m_hWaveoutDev, current, sizeof(WAVEHDR));			}			/// 计算剩余需要播放的数据			EnterCriticalSection(&m_cs);			unsigned long left = m_waveData.dwSize - m_wBlock.wpos;			unsigned int bDecodeFinished = m_waveData.bDecodeFinished;			LeaveCriticalSection(&m_cs);			unsigned long chunk = 0;			if (left >= BLOCK_SIZE)			{				chunk = BLOCK_SIZE;			}			else if (!bDecodeFinished)			{				// 如果解码还没有结束,现有的数据量有不足以填满一个缓存块,先不写入缓存				break;			}			else if (left && left < BLOCK_SIZE)			{				chunk = left;			}			else			{				//////////////////////////////////////////////////////////////////////				///                 < 播放完成>                                    ///				//////////////////////////////////////////////////////////////////////				/// 获取空闲缓存块数量				EnterCriticalSection(&m_cs);				int free = m_wBlock.wfreeblock;				LeaveCriticalSection(&m_cs);				/// 当所有的缓存块都播放完了,才意味着播放真正完成				if (free == BLOCK_COUNT)				{					/// Unprepare缓存块					for (int j = 0; j < m_wBlock.wfreeblock; j++)					{						if (m_wBlock.pWaveHdr[j].dwFlags & WHDR_PREPARED)//.........这里部分代码省略.........
开发者ID:tantaishan,项目名称:MyEcho,代码行数:101,


示例20: memset

short *Audio_MMSystem::open (AudioConfig &cfg, const char *){    WAVEFORMATEX  wfm;    if (isOpen)    {        _errorString = "MMSYSTEM ERROR: Audio device already open.";        return NULL;    }    isOpen = true;    /* Initialise blocks */    memset (blockHandles, 0, sizeof (blockHandles));    memset (blockHeaders, 0, sizeof (blockHeaders));    memset (blockHeaderHandles, 0, sizeof (blockHeaderHandles));    // Format    memset (&wfm, 0, sizeof(WAVEFORMATEX));    wfm.wFormatTag      = WAVE_FORMAT_PCM;    wfm.nChannels       = cfg.channels;    wfm.nSamplesPerSec  = cfg.frequency;    wfm.wBitsPerSample  = 16;    wfm.nBlockAlign     = wfm.wBitsPerSample / 8 * wfm.nChannels;    wfm.nAvgBytesPerSec = wfm.nSamplesPerSec * wfm.nBlockAlign;    wfm.cbSize          = 0;    // Rev 1.3 (saw) - Calculate buffer to hold 250ms of data    bufSize = wfm.nSamplesPerSec / 4 * wfm.nBlockAlign;    cfg.bufSize = bufSize / 2;    waveOutOpen (&waveHandle, WAVE_MAPPER, &wfm, 0, 0, 0);    if ( !waveHandle ) {        _errorString = "MMSYSTEM ERROR: Can't open wave out device.";        goto Audio_MMSystem_openError;    }    _settings    = cfg;    {        /* Allocate and lock memory for all mixing blocks: */        int i;        for (i = 0; i < MAXBUFBLOCKS; i++ )        {            /* Allocate global memory for mixing block: */            if ( (blockHandles[i] = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE,                                                bufSize)) == NULL )            {                _errorString = "MMSYSTEM ERROR: Can't allocate global memory.";                goto Audio_MMSystem_openError;            }            /* Lock mixing block memory: */            if ( (blocks[i] = (short *)GlobalLock(blockHandles[i])) == NULL )            {                _errorString = "MMSYSTEM ERROR: Can't lock global memory.";                goto Audio_MMSystem_openError;            }            /* Allocate global memory for mixing block header: */            if ( (blockHeaderHandles[i] = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE,                                                      sizeof(WAVEHDR))) == NULL )            {                _errorString = "MMSYSTEM ERROR: Can't allocate global memory.";                goto Audio_MMSystem_openError;            }            /* Lock mixing block header memory: */            WAVEHDR *header;            if ( (header = blockHeaders[i] =                  (WAVEHDR*)GlobalLock(blockHeaderHandles[i])) == NULL )            {                _errorString = "MMSYSTEM ERROR: Can't lock global memory.";                goto Audio_MMSystem_openError;            }            /* Reset wave header fields: */            memset (header, 0, sizeof (WAVEHDR));            header->lpData         = (char*)blocks[i];            header->dwBufferLength = bufSize;            header->dwFlags        = WHDR_DONE; /* mark the block is done */        }    }    blockNum = 0;    _sampleBuffer = blocks[blockNum];return _sampleBuffer;Audio_MMSystem_openError:    close ();    return NULL;}
开发者ID:Jordanio,项目名称:SidplaySharp,代码行数:91,


示例21: open_sndcard

phmstream_t *open_sndcard(int format, phcodec_t *codec, const char * deviceID){  phmstream_t *stream;  MMRESULT mr = NOERROR;  WAVEFORMATEX		wfx;  HWAVEIN hWaveIn;  HWAVEOUT hWaveOut;  stream = (phmstream_t*) osip_malloc(sizeof(phmstream_t));  memset(stream, 0, sizeof(*stream));#if USE_CODECS	stream->codec = codec;  if (codec->encoder_init)    stream->encoder_ctx = codec->encoder_init();  if (codec->decoder_init)  stream->decoder_ctx = codec->decoder_init();#endif /* !USE_CODECS */  switch(format)    {    case WAVE_FORMAT_MULAW:      wfx.wFormatTag = WAVE_FORMAT_MULAW;      wfx.cbSize = 0;      wfx.nAvgBytesPerSec = 8000;      wfx.nBlockAlign = 1;      wfx.nChannels = 1;      wfx.nSamplesPerSec = 8000;      wfx.wBitsPerSample = 8;      break;    case WAVE_FORMAT_ALAW:      wfx.wFormatTag = WAVE_FORMAT_ALAW;      wfx.cbSize = 0;      wfx.nAvgBytesPerSec = 8000;      wfx.nBlockAlign = 1;      wfx.nChannels = 1;      wfx.nSamplesPerSec = 8000;      wfx.wBitsPerSample = 8;      break;    case WAVE_FORMAT_GSM610:      gsmformat = GlobalAlloc(GMEM_MOVEABLE,(UINT)(sizeof(GSM610WAVEFORMAT)));      gsmformat = (LPGSM610WAVEFORMAT)GlobalLock(gsmformat);            gsmformat->wfx.wFormatTag = WAVE_FORMAT_GSM610;      gsmformat->wfx.nChannels = 1;      gsmformat->wfx.nSamplesPerSec = 8000;      gsmformat->wfx.nAvgBytesPerSec = 1625;      gsmformat->wfx.nBlockAlign = 65;      gsmformat->wfx.wBitsPerSample = 0;      gsmformat->wfx.cbSize = 2;      gsmformat->wSamplesPerBlock = 320;      break;    case WAVE_FORMAT_MSG723:	      wfx.wFormatTag = WAVE_FORMAT_MSG723;      wfx.cbSize = 0;      wfx.nAvgBytesPerSec = 800;      wfx.nBlockAlign = 1;      wfx.nChannels = 1;      wfx.nSamplesPerSec = 8000;      wfx.wBitsPerSample = 8;      break;	case WAVE_FORMAT_PCM:		wfx.wFormatTag = WAVE_FORMAT_PCM;		wfx.cbSize = 0;		wfx.nAvgBytesPerSec = 16000;		wfx.nBlockAlign = 2;		wfx.nChannels = 1;		wfx.nSamplesPerSec = 8000;		wfx.wBitsPerSample = 16;		break;    default:		break;    }	ph_media_init_deviceID(deviceID);   mr = waveOutOpen(&hWaveOut, waveoutDeviceID, &wfx, (DWORD)0/* SpeakerCallback */, 0/* arg */, CALLBACK_NULL /* CALLBACK_FUNCTION */);  stream->hWaveOut = hWaveOut;  if (mr != NOERROR)    {	fprintf(stderr, "__call_free: waveOutOpen: 0x%i/n", mr);      exit(-1);      return -1;    }  else    {      int i;      for (i=0; i<USED_OUT_BUFFERS; i++)	  {			WAVEHDR *whp = &stream->waveHdrOut[i];			whp->lpData = stream->dataBufferOut[i];		    whp->dwBufferLength = 512;  /* frameSize */			whp->dwFlags = 0;			whp->dwUser = i;	  //.........这里部分代码省略.........
开发者ID:BackupTheBerlios,项目名称:sfsipua-svn,代码行数:101,


示例22: SNDDMA_InitWav

/*==================SNDDM_InitWavCrappy windows multimedia base==================*/qboolean SNDDMA_InitWav (void){	WAVEFORMATEX  format;	int				i;	HRESULT			hr;	int rc;	snd_sent = 0;	snd_completed = 0;	shm = &sn;	shm->channels = 2;	shm->samplebits = 16;	shm->speed = 11025;		rc = COM_CheckParm("-sspeed");	if (rc)		shm->speed = Q_atoi(com_argv[rc+1]);				rc = COM_CheckParm("-sspleed");	if (rc)		spleed = Q_atoi(com_argv[rc+1]);	memset (&format, 0, sizeof(format));	format.wFormatTag = WAVE_FORMAT_PCM;	format.nChannels = shm->channels;	format.wBitsPerSample = shm->samplebits;	format.nSamplesPerSec = shm->speed;	format.nBlockAlign = format.nChannels		*format.wBitsPerSample / 8;	format.cbSize = 0;	format.nAvgBytesPerSec = format.nSamplesPerSec		*format.nBlockAlign;	/* Open a waveform device for output using window callback. */	while ((hr = waveOutOpen((LPHWAVEOUT)&hWaveOut, WAVE_MAPPER,					&format,					0, 0L, CALLBACK_NULL)) != MMSYSERR_NOERROR)	{		if (hr != MMSYSERR_ALLOCATED)		{			Con_SafePrintf ("waveOutOpen failed/n");			return false;		}		if (MessageBox (NULL,						"The sound hardware is in use by another app./n/n"					    "Select Retry to try to start sound again or Cancel to run Quake with no sound.",						"Sound not available",						MB_RETRYCANCEL | MB_SETFOREGROUND | MB_ICONEXCLAMATION) != IDRETRY)		{			Con_SafePrintf ("waveOutOpen failure;/n"							"  hardware already in use/n");			return false;		}	}	/*	 * Allocate and lock memory for the waveform data. The memory	 * for waveform data must be globally allocated with	 * GMEM_MOVEABLE and GMEM_SHARE flags.	*/	gSndBufSize = WAV_BUFFERS*WAV_BUFFER_SIZE;	hData = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, gSndBufSize);	if (!hData)	{		Con_SafePrintf ("Sound: Out of memory./n");		FreeSound ();		return false;	}	lpData = GlobalLock(hData);	if (!lpData)	{		Con_SafePrintf ("Sound: Failed to lock./n");		FreeSound ();		return false;	}	memset (lpData, 0, gSndBufSize);	/*	 * Allocate and lock memory for the header. This memory must	 * also be globally allocated with GMEM_MOVEABLE and	 * GMEM_SHARE flags.	 */	hWaveHdr = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE,		(DWORD) sizeof(WAVEHDR) * WAV_BUFFERS);	if (hWaveHdr == NULL)	{		Con_SafePrintf ("Sound: Failed to Alloc header./n");		FreeSound ();		return false;//.........这里部分代码省略.........
开发者ID:Blzut3,项目名称:Engoo,代码行数:101,


示例23: CreateEvent

void audio_waveout::open(void){    MMRESULT err;    HANDLE playthread_handle = 0;    /* Checkin the status of the object */    if (status != WAVEOUT_NOTREADY)    {        /* TODO: throw error */    }    /* Creating the EVENT object that will be signaled when       the playing thread has to wake up */    wakeup_playthread = CreateEvent(0, FALSE, FALSE, 0);    if (!wakeup_playthread)    {        status = WAVEOUT_ERR;        /* TODO: throw error */    }    /* Inialize buffers for recording audio data from the wavein audio line */    alloc_buffers_mem_(buffers, buf_secs);    init_headers_();    /* Sound format that will be captured by wavein */    wave_format.wFormatTag = WAVE_FORMAT_PCM;    wave_format.nChannels = aud_info.channels();    wave_format.nSamplesPerSec = aud_info.sample_rate();    wave_format.wBitsPerSample = aud_info.bits();    wave_format.nBlockAlign = aud_info.block_align();    wave_format.nAvgBytesPerSec = aud_info.byte_rate();    /* Creating the recording thread */    playthread_handle = CreateThread(NULL,                                     0,                                     audio_waveout::playing_procedure,                                     (PVOID)this,                                     0,                                     &playthread_id);    /* Checking thread handle */    if (!playthread_handle)    {        /* Updating status */        status = WAVEOUT_ERR;        /* TODO: throw error */    }    /* We don't need the thread handle anymore, so we can close it from now.       (We'll just need the thread ID for the `waveInOpen' API) */    CloseHandle(playthread_handle);    /* Reset the `audio_source' to the start position */    audio_buf.set_position_start();    /* Opens the WAVE_OUT device */    err = waveOutOpen(&waveout_handle,                      WAVE_MAPPER,                      &wave_format,                      playthread_id,                      0,                      CALLBACK_THREAD | WAVE_ALLOWSYNC);    if (err != MMSYSERR_NOERROR)    {        MessageBox(0, _T("waveOutOpen Error"), 0, 0);        /* TODO: throw error */    }    status = WAVEOUT_READY;}
开发者ID:GYGit,项目名称:reactos,代码行数:69,


示例24: wave_in_test_deviceIn

//.........这里部分代码省略.........       dev_name(device),wave_in_error(rc));    ok(frag.dwFlags&WHDR_PREPARED,"waveInPrepareHeader(%s): prepared flag "       "not set/n",dev_name(device));    if (winetest_interactive && rc==MMSYSERR_NOERROR) {        trace("Recording for 1 second at %5dx%2dx%d %s %s/n",              pwfx->nSamplesPerSec, pwfx->wBitsPerSample,pwfx->nChannels,              get_format_str(pwfx->wFormatTag),              flags & WAVE_FORMAT_DIRECT ? "WAVE_FORMAT_DIRECT" :              flags & WAVE_MAPPED ? "WAVE_MAPPED" : "");        rc=waveInAddBuffer(win, &frag, sizeof(frag));        ok(rc==MMSYSERR_NOERROR,"waveInAddBuffer(%s): rc=%s/n",           dev_name(device),wave_in_error(rc));        /* Check that the position is 0 at start */        check_position(device, win, 0, pwfx);        rc=waveInStart(win);        ok(rc==MMSYSERR_NOERROR,"waveInStart(%s): rc=%s/n",           dev_name(device),wave_in_error(rc));        res = WaitForSingleObject(hevent,1200);        ok(res==WAIT_OBJECT_0,"WaitForSingleObject failed for header/n");        ok(frag.dwFlags&WHDR_DONE,"WHDR_DONE not set in frag.dwFlags/n");        ok(frag.dwBytesRecorded==pwfx->nAvgBytesPerSec,           "frag.dwBytesRecorded=%d, should=%d/n",           frag.dwBytesRecorded,pwfx->nAvgBytesPerSec);        mmt.wType = TIME_BYTES;        rc=waveInGetPosition(win, &mmt, sizeof(mmt));        ok(rc==MMSYSERR_NOERROR,"waveInGetPosition(%s): rc=%s/n",           dev_name(device),wave_in_error(rc));        ok(mmt.wType == TIME_BYTES, "doesn't support TIME_BYTES: %u/n", mmt.wType);        ok(mmt.u.cb == frag.dwBytesRecorded, "Got wrong position: %u/n", mmt.u.cb);        /* stop playing on error */        if (res!=WAIT_OBJECT_0) {            rc=waveInStop(win);            ok(rc==MMSYSERR_NOERROR,               "waveInStop(%s): rc=%s/n",dev_name(device),wave_in_error(rc));        }    }    rc=waveInUnprepareHeader(win, &frag, sizeof(frag));    ok(rc==MMSYSERR_NOERROR,"waveInUnprepareHeader(%s): rc=%s/n",       dev_name(device),wave_in_error(rc));    rc=waveInClose(win);    ok(rc==MMSYSERR_NOERROR,       "waveInClose(%s): rc=%s/n",dev_name(device),wave_in_error(rc));    res=WaitForSingleObject(hevent,1000);    ok(res==WAIT_OBJECT_0,"WaitForSingleObject failed for close/n");    if (winetest_interactive)    {        /*         * Now play back what we recorded         */        HWAVEOUT wout;        trace("Playing back recorded sound/n");        rc=waveOutOpen(&wout,WAVE_MAPPER,pwfx,(DWORD_PTR)hevent,0,CALLBACK_EVENT);        ok(rc==MMSYSERR_NOERROR || rc==MMSYSERR_BADDEVICEID ||           rc==MMSYSERR_NOTENABLED || rc==MMSYSERR_NODRIVER ||           rc==MMSYSERR_ALLOCATED ||           ((rc==WAVERR_BADFORMAT || rc==MMSYSERR_NOTSUPPORTED) &&            !(pcaps->dwFormats & format)),           "waveOutOpen(%s) format=%dx%2dx%d flags=%x(%s) rc=%s/n",           dev_name(device),pwfx->nSamplesPerSec,pwfx->wBitsPerSample,           pwfx->nChannels,CALLBACK_EVENT|flags,           wave_open_flags(CALLBACK_EVENT),wave_out_error(rc));        if (rc==MMSYSERR_NOERROR)        {            rc=waveOutPrepareHeader(wout, &frag, sizeof(frag));            ok(rc==MMSYSERR_NOERROR,"waveOutPrepareHeader(%s): rc=%s/n",               dev_name(device),wave_out_error(rc));            if (rc==MMSYSERR_NOERROR)            {                WaitForSingleObject(hevent,INFINITE);                rc=waveOutWrite(wout, &frag, sizeof(frag));                ok(rc==MMSYSERR_NOERROR,"waveOutWrite(%s): rc=%s/n",                   dev_name(device),wave_out_error(rc));                WaitForSingleObject(hevent,INFINITE);                rc=waveOutUnprepareHeader(wout, &frag, sizeof(frag));                ok(rc==MMSYSERR_NOERROR,"waveOutUnprepareHeader(%s): rc=%s/n",                   dev_name(device),wave_out_error(rc));            }            rc=waveOutClose(wout);            ok(rc==MMSYSERR_NOERROR,"waveOutClose(%s): rc=%s/n",               dev_name(device),wave_out_error(rc));        }        else            trace("Unable to play back the recorded sound/n");    }    HeapFree(GetProcessHeap(), 0, frag.lpData);    CloseHandle(hevent);}
开发者ID:AmesianX,项目名称:wine,代码行数:101,


示例25: winmm_stream_init

static intwinmm_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name,                  cubeb_stream_params stream_params, unsigned int latency,                  cubeb_data_callback data_callback,                  cubeb_state_callback state_callback,                  void * user_ptr){  MMRESULT r;  WAVEFORMATEXTENSIBLE wfx;  cubeb_stream * stm;  int i;  size_t bufsz;  assert(context);  assert(stream);  *stream = NULL;  memset(&wfx, 0, sizeof(wfx));  if (stream_params.channels > 2) {    wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;    wfx.Format.cbSize = sizeof(wfx) - sizeof(wfx.Format);  } else {    wfx.Format.wFormatTag = WAVE_FORMAT_PCM;    if (stream_params.format == CUBEB_SAMPLE_FLOAT32LE) {      wfx.Format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;    }    wfx.Format.cbSize = 0;  }  wfx.Format.nChannels = stream_params.channels;  wfx.Format.nSamplesPerSec = stream_params.rate;  /* XXX fix channel mappings */  wfx.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;  switch (stream_params.format) {  case CUBEB_SAMPLE_S16LE:    wfx.Format.wBitsPerSample = 16;    wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;    break;  case CUBEB_SAMPLE_FLOAT32LE:    wfx.Format.wBitsPerSample = 32;    wfx.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;    break;  default:    return CUBEB_ERROR_INVALID_FORMAT;  }  wfx.Format.nBlockAlign = (wfx.Format.wBitsPerSample * wfx.Format.nChannels) / 8;  wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign;  wfx.Samples.wValidBitsPerSample = wfx.Format.wBitsPerSample;  EnterCriticalSection(&context->lock);  /* CUBEB_STREAM_MAX is a horrible hack to avoid a situation where, when     many streams are active at once, a subset of them will not consume (via     playback) or release (via waveOutReset) their buffers. */  if (context->active_streams >= CUBEB_STREAM_MAX) {    LeaveCriticalSection(&context->lock);    return CUBEB_ERROR;  }  context->active_streams += 1;  LeaveCriticalSection(&context->lock);  stm = calloc(1, sizeof(*stm));  assert(stm);  stm->context = context;  stm->params = stream_params;  stm->data_callback = data_callback;  stm->state_callback = state_callback;  stm->user_ptr = user_ptr;  stm->written = 0;  if (latency < context->minimum_latency) {    latency = context->minimum_latency;  }  bufsz = (size_t) (stm->params.rate / 1000.0 * latency * bytes_per_frame(stm->params) / NBUFS);  if (bufsz % bytes_per_frame(stm->params) != 0) {    bufsz += bytes_per_frame(stm->params) - (bufsz % bytes_per_frame(stm->params));  }  assert(bufsz % bytes_per_frame(stm->params) == 0);  stm->buffer_size = bufsz;  InitializeCriticalSection(&stm->lock);  stm->event = CreateEvent(NULL, FALSE, FALSE, NULL);  if (!stm->event) {    winmm_stream_destroy(stm);    return CUBEB_ERROR;  }  stm->soft_volume = -1.0;  /* winmm_buffer_callback will be called during waveOutOpen, so all     other initialization must be complete before calling it. */  r = waveOutOpen(&stm->waveout, WAVE_MAPPER, &wfx.Format,//.........这里部分代码省略.........
开发者ID:qiuyang001,项目名称:Spidermonkey,代码行数:101,


示例26: dsp_test_format

static bool dsp_test_format(WAVEFORMATEX *format){	HWAVEOUT waveout;	return waveOutOpen(&waveout, 0, format, 0, 0, WAVE_FORMAT_QUERY | CALLBACK_NULL) == MMSYSERR_NOERROR;}
开发者ID:AnXi-TieGuanYin-Tea,项目名称:flinux,代码行数:5,


示例27: SNDDMA_InitWav

/*==================SNDDM_InitWavCrappy windows multimedia base==================*/qboolean SNDDMA_InitWav (void){	WAVEFORMATEX  format;	int				i;	HRESULT			hr;	Com_Printf( "Initializing wave sound/n" );	snd_sent = 0;	snd_completed = 0;	dma.channels = 2;	dma.samplebits = 16;	if (s_khz->value == 44)		dma.speed = 44100;	if (s_khz->value == 22)		dma.speed = 22050;	else		dma.speed = 11025;	memset (&format, 0, sizeof(format));	format.wFormatTag = WAVE_FORMAT_PCM;	format.nChannels = dma.channels;	format.wBitsPerSample = dma.samplebits;	format.nSamplesPerSec = dma.speed;	format.nBlockAlign = format.nChannels		*format.wBitsPerSample / 8;	format.cbSize = 0;	format.nAvgBytesPerSec = format.nSamplesPerSec		*format.nBlockAlign;	/* Open a waveform device for output using window callback. */	Com_DPrintf ("...opening waveform device: ");	while ((hr = waveOutOpen((LPHWAVEOUT)&hWaveOut, WAVE_MAPPER,					&format,					0, 0L, CALLBACK_NULL)) != MMSYSERR_NOERROR)	{		if (hr != MMSYSERR_ALLOCATED)		{			Com_Printf ("failed/n");			return false;		}		if (MessageBox (NULL,						"The sound hardware is in use by another app./n/n"					    "Select Retry to try to start sound again or Cancel to run Quake 2 with no sound.",						"Sound not available",						MB_RETRYCANCEL | MB_SETFOREGROUND | MB_ICONEXCLAMATION) != IDRETRY)		{			Com_Printf ("hw in use/n" );			return false;		}	}	Com_DPrintf( "ok/n" );	/*	 * Allocate and lock memory for the waveform data. The memory	 * for waveform data must be globally allocated with	 * GMEM_MOVEABLE and GMEM_SHARE flags.	*/	Com_DPrintf ("...allocating waveform buffer: ");	gSndBufSize = WAV_BUFFERS*WAV_BUFFER_SIZE;	hData = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, gSndBufSize);	if (!hData)	{		Com_Printf( " failed/n" );		FreeSound ();		return false;	}	Com_DPrintf( "ok/n" );	Com_DPrintf ("...locking waveform buffer: ");	lpData = GlobalLock(hData);	if (!lpData)	{		Com_Printf( " failed/n" );		FreeSound ();		return false;	}	memset (lpData, 0, gSndBufSize);	Com_DPrintf( "ok/n" );	/*	 * Allocate and lock memory for the header. This memory must	 * also be globally allocated with GMEM_MOVEABLE and	 * GMEM_SHARE flags.	 */	Com_DPrintf ("...allocating waveform header: ");	hWaveHdr = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE,		(DWORD) sizeof(WAVEHDR) * WAV_BUFFERS);//.........这里部分代码省略.........
开发者ID:kytulendu,项目名称:Quake-2,代码行数:101,


示例28: main

/////////////////////////////////////////////////                                           ////              メイン
C++ waveOutPause函数代码示例
C++ waveOutGetNumDevs函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。