这篇教程C++ vorbis_synthesis函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中vorbis_synthesis函数的典型用法代码示例。如果您正苦于以下问题:C++ vorbis_synthesis函数的具体用法?C++ vorbis_synthesis怎么用?C++ vorbis_synthesis使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了vorbis_synthesis函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: process_vorbis_packetsstatic void process_vorbis_packets(VORBIS_FEED *feed) { int res, samples, n, stat; time_t now; float **pcm; while (1) { res = ogg_stream_packetout(&(feed->os), &(feed->op)); if (res == 0) return; if (res < 0) { now = time(NULL); logmsg("%s: out of sync/n", ctime(&now)); continue; } stat = vorbis_synthesis(&(feed->vb), &(feed->op)); if (stat == 0) vorbis_synthesis_blockin(&(feed->vd), &(feed->vb)); else if (stat == OV_ENOTAUDIO) { logmsg("non-audio packet ignored/n"); continue; } else { logmsg("bad vorbis packet/n"); continue; } while ((samples = vorbis_synthesis_pcmout(&(feed->vd), &pcm)) > 0) { n = write_ring(feed, pcm, samples); vorbis_synthesis_read(&(feed->vd), n); } } return; }
开发者ID:pmyadlowsky,项目名称:mash,代码行数:31,
示例2: VorbisDecodeint VorbisDecode(Vorbis *vorbis, void *buffer, int buflen, AVCallback callback, long streamend, long granulepos) { // setup ogg packet vorbis->ogg.packet = buffer; vorbis->ogg.bytes = buflen; int status = 0; if (vorbis->headers < 3) { status = vorbis_synthesis_headerin(&vorbis->info, &vorbis->comment, &vorbis->ogg); vorbis->headers++; if (status == 0 && vorbis->headers == 3) { vorbis->outlen /= vorbis->info.channels; status = vorbis_synthesis_init(&vorbis->dsp, &vorbis->info); if (status == 0) status = vorbis_block_init(&vorbis->dsp, &vorbis->block); } } else { // decode status = vorbis_synthesis(&vorbis->block, &vorbis->ogg); if (status == 0) status = vorbis_synthesis_blockin(&vorbis->dsp, &vorbis->block); int samples = 0; float **pcm; int cutoff = granulepos - vorbis->lastgranule; while ((samples = vorbis_synthesis_pcmout(&vorbis->dsp, &pcm)) > 0) { // interleave int channels = vorbis->info.channels; int len = samples < vorbis->outlen ? samples : vorbis->outlen; if(streamend) { len = len > cutoff ? cutoff : len; cutoff -= len; } for (int i = 0; i < channels; i++) { float *buf = &vorbis->outbuf[i]; for (int j = 0; j < len; j++) { *buf = pcm[i][j]; buf += channels; } } status = vorbis_synthesis_read(&vorbis->dsp, len); callback(len * channels); if(streamend && cutoff <= 0) break; } } if (vorbis->ogg.b_o_s) vorbis->ogg.b_o_s = 0; if(granulepos > 0) vorbis->lastgranule = granulepos; return status;}
开发者ID:mon,项目名称:vorbis.js,代码行数:59,
示例3: loadAudio/* return: audio wants more packets*/static qboolean loadAudio(void) { qboolean anyDataTransferred = qtrue; float **pcm; float *right,*left; int samples, samplesNeeded; int i; short* ptr; ogg_packet op; vorbis_block vb; memset(&op,0,sizeof(op)); memset(&vb,0,sizeof(vb)); vorbis_block_init(&g_ogm.vd,&vb); while(anyDataTransferred && g_ogm.currentTime+MAX_AUDIO_PRELOAD>(int)(g_ogm.vd.granulepos*1000/g_ogm.vi.rate)) { anyDataTransferred =qfalse; if((samples=vorbis_synthesis_pcmout(&g_ogm.vd,&pcm))>0) { // vorbis -> raw ptr = (short*)rawBuffer; samplesNeeded = (SIZEOF_RAWBUFF) / (2*2);// (width*channel) if(samples<samplesNeeded) samplesNeeded = samples; left=pcm[0]; right=(g_ogm.vi.channels>1)?pcm[1]:pcm[0]; for(i=0;i<samplesNeeded;++i) { ptr[0]=(left[i]>=-1.0f && left[i]<=1.0f)?left[i]*32767.f:32767*((left[i]>0.0f)-(left[i]<0.0f)); ptr[1]=(right[i]>=-1.0f && right[i]<=1.0f)?right[i]*32767.f:32767*((right[i]>0.0f)-(right[i]<0.0f)); ptr+=2;//numChans; } if(i>0) { // tell libvorbis how many samples we actually consumed vorbis_synthesis_read(&g_ogm.vd,i); S_RawSamples( 0, i, g_ogm.vi.rate, 2, 2, rawBuffer, 1.0f, -1 ); anyDataTransferred = qtrue; } } if(!anyDataTransferred) { // op -> vorbis if(ogg_stream_packetout(&g_ogm.os_audio,&op)) { if(vorbis_synthesis(&vb,&op)==0) vorbis_synthesis_blockin(&g_ogm.vd,&vb); anyDataTransferred = qtrue; } } } vorbis_block_clear(&vb); if(g_ogm.currentTime+MIN_AUDIO_PRELOAD>(int)(g_ogm.vd.granulepos*1000/g_ogm.vi.rate)) return qtrue; else return qfalse;}
开发者ID:PadWorld-Entertainment,项目名称:wop-gamesource,代码行数:63,
示例4: assertvoid Inpin::Decode(IMediaSample* pInSample){ BYTE* buf_in; HRESULT hr = pInSample->GetPointer(&buf_in); assert(SUCCEEDED(hr)); assert(buf_in); const long len_in = pInSample->GetActualDataLength(); assert(len_in >= 0); ogg_packet& pkt = m_packet; pkt.packet = buf_in; pkt.bytes = len_in; ++pkt.packetno; int status = vorbis_synthesis(&m_block, &pkt); assert(status == 0); //TODO status = vorbis_synthesis_blockin(&m_dsp_state, &m_block); assert(status == 0); //TODO typedef VorbisTypes::VORBISFORMAT2 FMT; const AM_MEDIA_TYPE& mt = m_connection_mtv[0]; assert(mt.cbFormat > sizeof(FMT)); assert(mt.pbFormat); const FMT& fmt = (const FMT&)(*mt.pbFormat); assert(fmt.channels > 0); assert(fmt.samplesPerSec > 0); float** sv; const int pcmout_count = vorbis_synthesis_pcmout(&m_dsp_state, &sv); if (pcmout_count <= 0) return; assert(sv); for (DWORD i = 0; i < fmt.channels; ++i) { const float* const first = sv[i]; const float* const last = first + pcmout_count; samples_t& ss = m_channels[i]; ss.insert(ss.end(), first, last); } sv = 0; status = vorbis_synthesis_read(&m_dsp_state, pcmout_count); assert(status == 0);}
开发者ID:brion,项目名称:webmdshow,代码行数:56,
示例5: while float VideoClip_Theora::_decodeAudio() { if (this->restarted) { return -1.0f; } ogg_packet opVorbis; float** pcm; int length = 0; float timeStamp = -1.0f; bool readPastTimestamp = false; float factor = 1.0f / this->audioFrequency; float videoTime = (float)this->lastDecodedFrameNumber / this->fps; float min = this->frameQueue->getSize() / this->fps + 1.0f; float audioTime = 0.0f; while (true) { length = vorbis_synthesis_pcmout(&this->info.VorbisDSPState, &pcm); if (length == 0) { if (ogg_stream_packetout(&this->info.VorbisStreamState, &opVorbis) > 0) { if (vorbis_synthesis(&this->info.VorbisBlock, &opVorbis) == 0) { if (timeStamp < 0 && opVorbis.granulepos >= 0) { timeStamp = (float)vorbis_granule_time(&this->info.VorbisDSPState, opVorbis.granulepos); } else if (timeStamp >= 0) { readPastTimestamp = true; } vorbis_synthesis_blockin(&this->info.VorbisDSPState, &this->info.VorbisBlock); } continue; } audioTime = this->readAudioSamples * factor; // always buffer up of audio ahead of the frames if (audioTime - videoTime >= min || !this->_readData()) { break; } } if (length > 0) { this->addAudioPacket(pcm, length, this->audioGain); this->readAudioSamples += length; if (readPastTimestamp) { timeStamp += (float)length / this->info.VorbisInfo.rate; } vorbis_synthesis_read(&this->info.VorbisDSPState, length); // tell vorbis we read a number of samples } } return timeStamp; }
开发者ID:Stratagus,项目名称:theoraplayer,代码行数:56,
示例6: vorbis_synthesis_blockin uint8_t ADM_vorbis::run( uint8_t * ptr, uint32_t nbIn, uint8_t * outptr, uint32_t * nbOut){ogg_packet packet;float **sample_pcm;int nb_synth; *nbOut=0; if(!_init) return 0; packet.b_o_s=0; packet.e_o_s=0; packet.bytes=nbIn; packet.packet=ptr; if(!vorbis_synthesis(&STRUCT->vblock,&packet)) { vorbis_synthesis_blockin(&STRUCT->vdsp,&STRUCT->vblock); } nb_synth=vorbis_synthesis_pcmout(&STRUCT->vdsp,&sample_pcm); if(nb_synth<0) { printf("error decoding vorbis %d/n",nb_synth); return 0; } // Now convert the float / per channel samples to interleaved 16 bits pcm audio float scale = 32767.f * STRUCT->ampscale; int16_t *out; int channels,val; channels=STRUCT->vinfo.channels; out=(int16_t *)outptr; *nbOut=channels*2*nb_synth; for(uint32_t samp=0;samp<nb_synth;samp++) { for(uint32_t chan=0;chan<channels;chan++) { val=(int)floor(sample_pcm[chan][samp]*scale); if(val>32767) val=32767; if(val<-32768) val=-32768; *out++=val; } } // Puge them vorbis_synthesis_read(&STRUCT->vdsp,nb_synth); aprintf("This round : in %d bytes, out %d bytes/n",nbIn,*nbOut); return 1; }
开发者ID:BackupTheBerlios,项目名称:avidemux-svn,代码行数:54,
示例7: IoMessage_locals_oggPacketArgAt_IoObject *IoVorbisBlock_synthesis(IoVorbisDspState *self, IoObject *locals, IoMessage *m){ /*doc VorbisBlock synthesis(packet) Decode the vorbis data from the packet, storing it in the block. */ IoOggPacket *packet = IoMessage_locals_oggPacketArgAt_(m, locals, 0); int ret = vorbis_synthesis(DATA(self), ((ogg_packet*)(IoObject_dataPointer(packet)))); return IONUMBER(ret);}
开发者ID:anthem,项目名称:io,代码行数:11,
示例8: VorbisProcessDataint VorbisProcessData(OggData *data, char *buffer){ int num_samples; float **pcm; float *chan; int i, j; int val; ogg_packet packet; int len; len = 0; num_samples = 0; while (!len) { if (ogg_stream_packetout(&data->astream, &packet) != 1) { if (OggGetNextPage(&data->page, &data->sync, data->File)) { // EOF return -1; } ogg_stream_pagein(&data->astream, &data->page); } else { if (vorbis_synthesis(&data->vblock, &packet) == 0) { vorbis_synthesis_blockin(&data->vdsp, &data->vblock); } while ((num_samples = vorbis_synthesis_pcmout(&data->vdsp, &pcm)) > 0) { j = 0; for (i = 0; i < data->vinfo.channels; ++i) { chan = pcm[i]; for (j = 0; j < num_samples; ++j) { val = static_cast<int>(chan[j] * 32767.f); if (val > 32767) { val = 32767; } else if (val < -32768) { val = -32768; } *(Sint16 *)(buffer + len + (j * 2 * data->vinfo.channels) + i * 2) = (Sint16)val; } } len += j * 2 * data->vinfo.channels; // we consumed num_samples vorbis_synthesis_read(&data->vdsp, num_samples); } } } return len;}
开发者ID:OneSleepyDev,项目名称:boswars_osd,代码行数:52,
示例9: vorbis_decodeint vorbis_decode(vorbis_ctx *v, ogg_packet *pkt, const float ***pcm){ int r; if (0 != (r = vorbis_synthesis(&v->blk, pkt))) return r; vorbis_synthesis_blockin(&v->ds, &v->blk); r = vorbis_synthesis_pcmout(&v->ds, (float***)pcm); vorbis_synthesis_read(&v->ds, r); return r;}
开发者ID:stsaz,项目名称:ff-3pt,代码行数:13,
示例10: oggvorbis_decode_framestatic int oggvorbis_decode_frame(AVCodecContext *avccontext, void *data, int *got_frame_ptr, AVPacket *avpkt){ OggVorbisDecContext *context = avccontext->priv_data ; AVFrame *frame = data; float **pcm ; ogg_packet *op= &context->op; int samples, total_samples, total_bytes; int ret; int16_t *output; if(!avpkt->size){ //FIXME flush return 0; } frame->nb_samples = 8192*4; if ((ret = ff_get_buffer(avccontext, frame)) < 0) { av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed/n"); return ret; } output = (int16_t *)frame->data[0]; op->packet = avpkt->data; op->bytes = avpkt->size;// av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d/n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);/* for(i=0; i<op->bytes; i++) av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]); av_log(avccontext, AV_LOG_DEBUG, "/n");*/ if(vorbis_synthesis(&context->vb, op) == 0) vorbis_synthesis_blockin(&context->vd, &context->vb) ; total_samples = 0 ; total_bytes = 0 ; while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) { conv(samples, pcm, (char*)output + total_bytes, context->vi.channels) ; total_bytes += samples * 2 * context->vi.channels ; total_samples += samples ; vorbis_synthesis_read(&context->vd, samples) ; } frame->nb_samples = total_samples; *got_frame_ptr = 1; return avpkt->size;}
开发者ID:1051716,项目名称:xbmc-1,代码行数:50,
示例11: CIN_OGM_LoadAudioFrame/** * @return true if audio wants more packets */static bool CIN_OGM_LoadAudioFrame (cinematic_t* cin){ vorbis_block vb; OBJZERO(vb); vorbis_block_init(&OGMCIN.vd, &vb); while (OGMCIN.currentTime > (int) (OGMCIN.vd.granulepos * 1000 / OGMCIN.vi.rate)) { float** pcm; const int samples = vorbis_synthesis_pcmout(&OGMCIN.vd, &pcm); if (samples > 0) { /* vorbis -> raw */ const int width = 2; const int channel = 2; int samplesNeeded = sizeof(rawBuffer) / (width * channel); const float* left = pcm[0]; const float* right = (OGMCIN.vi.channels > 1) ? pcm[1] : pcm[0]; short* ptr = (short*)rawBuffer; int i; if (samples < samplesNeeded) samplesNeeded = samples; for (i = 0; i < samplesNeeded; ++i, ptr += channel) { ptr[0] = (left[i] >= -1.0f && left[i] <= 1.0f) ? left[i] * 32767.f : 32767 * ((left[i] > 0.0f) - (left[i] < 0.0f)); ptr[1] = (right[i] >= -1.0f && right[i] <= 1.0f) ? right[i] * 32767.f : 32767 * ((right[i] > 0.0f) - (right[i] < 0.0f)); } /* tell libvorbis how many samples we actually consumed */ vorbis_synthesis_read(&OGMCIN.vd, i); if (!cin->noSound) M_AddToSampleBuffer(&OGMCIN.musicStream, OGMCIN.vi.rate, i, rawBuffer); } else { ogg_packet op; /* op -> vorbis */ if (ogg_stream_packetout(&OGMCIN.os_audio, &op)) { if (vorbis_synthesis(&vb, &op) == 0) vorbis_synthesis_blockin(&OGMCIN.vd, &vb); } else break; } } vorbis_block_clear(&vb); return OGMCIN.currentTime > (int)(OGMCIN.vd.granulepos * 1000 / OGMCIN.vi.rate);}
开发者ID:Ed-von-Schleck,项目名称:ufoai,代码行数:52,
示例12: vorbis_synthesis/***************************************************************************** * DecodePacket: decodes a Vorbis packet. *****************************************************************************/static block_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket ){ decoder_sys_t *p_sys = p_dec->p_sys; int i_samples; INTERLEAVE_TYPE **pp_pcm; if( p_oggpacket->bytes && vorbis_synthesis( &p_sys->vb, p_oggpacket ) == 0 ) vorbis_synthesis_blockin( &p_sys->vd, &p_sys->vb ); /* **pp_pcm is a multichannel float vector. In stereo, for * example, pp_pcm[0] is left, and pp_pcm[1] is right. i_samples is * the size of each channel. Convert the float values * (-1.<=range<=1.) to whatever PCM format and write it out */ if( ( i_samples = vorbis_synthesis_pcmout( &p_sys->vd, &pp_pcm ) ) > 0 ) { block_t *p_aout_buffer; if( decoder_UpdateAudioFormat( p_dec ) ) return NULL; p_aout_buffer = decoder_NewAudioBuffer( p_dec, i_samples ); if( p_aout_buffer == NULL ) return NULL; /* Interleave the samples */ Interleave( (INTERLEAVE_TYPE*)p_aout_buffer->p_buffer, (const INTERLEAVE_TYPE**)pp_pcm, p_sys->vi.channels, i_samples, p_sys->pi_chan_table); /* Tell libvorbis how many samples we actually consumed */ vorbis_synthesis_read( &p_sys->vd, i_samples ); /* Date management */ p_aout_buffer->i_pts = date_Get( &p_sys->end_date ); p_aout_buffer->i_length = date_Increment( &p_sys->end_date, i_samples ) - p_aout_buffer->i_pts; return p_aout_buffer; } else { return NULL; }}
开发者ID:IAPark,项目名称:vlc,代码行数:49,
示例13: getHandle/* * Class: org_tritonus_lowlevel_pvorbis_Block * Method: synthesis_1native * Signature: (Lorg/tritonus/lowlevel/ogg/Packet;)I */JNIEXPORT jint JNICALLJava_org_tritonus_lowlevel_pvorbis_Block_synthesis_1native(JNIEnv* env, jobject obj, jobject packet){ vorbis_block* handle; ogg_packet* packetHandle; int nReturn; if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_Block_synthesis(): begin/n"); } handle = getHandle(env, obj); packetHandle = NULL; if (packet != NULL) { packetHandle = getPacketNativeHandle(env, packet); } if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_Block_synthesis(): packet handle: %p/n", packetHandle); } nReturn = vorbis_synthesis(handle, packetHandle); if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_Block_synthesis(): end/n"); } return nReturn;}
开发者ID:gpeev,项目名称:Konsolenradio,代码行数:25,
示例14: oggvorbis_decode_framestatic int oggvorbis_decode_frame(AVCodecContext *avccontext, void *data, int *data_size, uint8_t *buf, int buf_size){ OggVorbisContext *context = avccontext->priv_data ; float **pcm ; ogg_packet *op= &context->op; int samples, total_samples, total_bytes; if(!buf_size){ //FIXME flush return 0; } op->packet = buf; op->bytes = buf_size;// av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d/n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);/* for(i=0; i<op->bytes; i++) av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]); av_log(avccontext, AV_LOG_DEBUG, "/n");*/ if(vorbis_synthesis(&context->vb, op) == 0) vorbis_synthesis_blockin(&context->vd, &context->vb) ; total_samples = 0 ; total_bytes = 0 ; while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) { conv(samples, pcm, (char*)data + total_bytes, context->vi.channels) ; total_bytes += samples * 2 * context->vi.channels ; total_samples += samples ; vorbis_synthesis_read(&context->vd, samples) ; } *data_size = total_bytes ; return buf_size ;}
开发者ID:VoxOx,项目名称:VoxOx,代码行数:39,
示例15: th_decode_ctl void VideoClip_Theora::_executeRestart() { bool paused = this->timer->isPaused(); if (!paused) { this->timer->pause(); } long initialGranule = 0; th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &initialGranule, sizeof(initialGranule)); th_decode_free(this->info.TheoraDecoder); this->info.TheoraDecoder = th_decode_alloc(&this->info.TheoraInfo, this->info.TheoraSetup); ogg_stream_reset(&this->info.TheoraStreamState); if (this->audioInterface != NULL) { // empty the DSP buffer ogg_packet opVorbis; this->readAudioSamples = 0; while (ogg_stream_packetout(&this->info.VorbisStreamState, &opVorbis) > 0) { if (vorbis_synthesis(&this->info.VorbisBlock, &opVorbis) == 0) { vorbis_synthesis_blockin(&this->info.VorbisDSPState, &this->info.VorbisBlock); } } ogg_stream_reset(&this->info.VorbisStreamState); } ogg_sync_reset(&this->info.OggSyncState); this->stream->seek(0); ogg_int64_t granulePos = 0; th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &granulePos, sizeof(granulePos)); this->endOfFile = false; this->restarted = true; if (!paused) { this->timer->play(); } }
开发者ID:Stratagus,项目名称:theoraplayer,代码行数:37,
示例16: vorbis_synthesis_blockin uint8_t ADM_vorbis::run(uint8_t *inptr, uint32_t nbIn, float *outptr, uint32_t *nbOut){ogg_packet packet;float **sample_pcm;int nb_synth; *nbOut=0; if(!_init) return 0; packet.b_o_s=0; packet.e_o_s=0; packet.bytes=nbIn; packet.packet=inptr; packet.granulepos=-1; if(!vorbis_synthesis(&STRUCT->vblock,&packet)) { vorbis_synthesis_blockin(&STRUCT->vdsp,&STRUCT->vblock); } nb_synth=vorbis_synthesis_pcmout(&STRUCT->vdsp,&sample_pcm); if(nb_synth<0) { printf("error decoding vorbis %d/n",nb_synth); return 0; } for (uint32_t samp = 0; samp < nb_synth; samp++) for (uint8_t chan = 0; chan < STRUCT->vinfo.channels; chan++) *outptr++ = sample_pcm[chan][samp] * STRUCT->ampscale; *nbOut = STRUCT->vinfo.channels * nb_synth; // Puge them vorbis_synthesis_read(&STRUCT->vdsp,nb_synth); aprintf("This round : in %d bytes, out %d bytes synthetized:%d/n",nbIn,*nbOut,nb_synth); return 1; }
开发者ID:BackupTheBerlios,项目名称:avidemux-svn,代码行数:36,
示例17: lame_decode_ogg_fromfile/* For lame_decode_fromfile: return code -1 error, or eof 0 ok, but need more data before outputing any samples n number of samples output. */int lame_decode_ogg_fromfile( lame_global_flags* gfp, FILE* fd, short int pcm_l[], short int pcm_r[], mp3data_struct* mp3data ){ lame_internal_flags *gfc = gfp->internal_flags; int samples,result,i,j,eof=0,eos=0,bout=0; double **pcm; while(1){ /* **pcm is a multichannel double vector. In stereo, for example, pcm[0] is left, and pcm[1] is right. samples is the size of each channel. Convert the float values (-1.<=range<=1.) to whatever PCM format and write it out */ /* unpack the buffer, if it has at least 1024 samples */ convsize=1024; samples=vorbis_synthesis_pcmout(&vd,&pcm); if (samples >= convsize || eos || eof) { /* read 1024 samples, or if eos, read what ever is in buffer */ int clipflag=0; bout=(samples<convsize?samples:convsize); /* convert doubles to 16 bit signed ints (host order) and interleave */ for(i=0;i<vi.channels;i++){ double *mono=pcm[i]; for(j=0;j<bout;j++){ int val=mono[j]*32767.; /* might as well guard against clipping */ if(val>32767){ val=32767; clipflag=1; } if(val<-32768){ val=-32768; clipflag=1; } if (i==0) pcm_l[j]=val; if (i==1) pcm_r[j]=val; } } /* if(clipflag) MSGF( gfc, "Clipping in frame %ld/n", vd.sequence ); */ /* tell libvorbis how many samples we actually consumed */ vorbis_synthesis_read(&vd,bout); break; } result=ogg_sync_pageout(&oy,&og); if(result==0) { /* need more data */ }else if (result==-1){ /* missing or corrupt data at this page position */ ERRORF( gfc, "Corrupt or missing data in bitstream; " "continuing.../n"); }else{ /* decode this page */ ogg_stream_pagein(&os,&og); /* can safely ignore errors at this point */ do { result=ogg_stream_packetout(&os,&op); if(result==0) { /* need more data */ } else if(result==-1){ /* missing or corrupt data at this page position */ /* no reason to complain; already complained above */ }else{ /* we have a packet. Decode it */ vorbis_synthesis(&vb,&op); vorbis_synthesis_blockin(&vd,&vb); } } while (result!=0); } /* is this the last page? */ if(ogg_page_eos(&og))eos=1; if(!eos){ char *buffer; int bytes; buffer=ogg_sync_buffer(&oy,4096); bytes=fread(buffer,1,4096,fd); ogg_sync_wrote(&oy,bytes); if(bytes==0)eof=1; } }//.........这里部分代码省略.........
开发者ID:aahud,项目名称:harvey,代码行数:101,
示例18: whilevoid VideoStreamPlaybackTheora::update(float p_delta) { if (!file) return; if (!playing || paused) { //printf("not playing/n"); return; };#ifdef THEORA_USE_THREAD_STREAMING thread_sem->post();#endif //print_line("play "+rtos(p_delta)); time += p_delta; if (videobuf_time > get_time()) { return; //no new frames need to be produced } bool frame_done = false; bool audio_done = !vorbis_p; while (!frame_done || (!audio_done && !vorbis_eos)) { //a frame needs to be produced ogg_packet op; bool no_theora = false; while (vorbis_p) { int ret; float **pcm; bool buffer_full = false; /* if there's pending, decoded audio, grab it */ ret = vorbis_synthesis_pcmout(&vd, &pcm); if (ret > 0) { const int AUXBUF_LEN = 4096; int to_read = ret; int16_t aux_buffer[AUXBUF_LEN]; while (to_read) { int m = MIN(AUXBUF_LEN / vi.channels, to_read); int count = 0; for (int j = 0; j < m; j++) { for (int i = 0; i < vi.channels; i++) { int val = Math::fast_ftoi(pcm[i][j] * 32767.f); if (val > 32767) val = 32767; if (val < -32768) val = -32768; aux_buffer[count++] = val; } } if (mix_callback) { int mixed = mix_callback(mix_udata, aux_buffer, m); to_read -= mixed; if (mixed != m) { //could mix no more buffer_full = true; break; } } else { to_read -= m; //just pretend we sent the audio } } int tr = vorbis_synthesis_read(&vd, ret - to_read); if (vd.granulepos >= 0) { //print_line("wrote: "+itos(audio_frames_wrote)+" gpos: "+itos(vd.granulepos)); } //print_line("mix audio!"); audio_frames_wrote += ret - to_read; //print_line("AGP: "+itos(vd.granulepos)+" added "+itos(ret-to_read)); } else { /* no pending audio; is there a pending packet to decode? */ if (ogg_stream_packetout(&vo, &op) > 0) { if (vorbis_synthesis(&vb, &op) == 0) { /* test for success! */ vorbis_synthesis_blockin(&vd, &vb); } } else { /* we need more data; break out to suck in another page */ //printf("need moar data/n"); break; }; } audio_done = videobuf_time < (audio_frames_wrote / float(vi.rate)); if (buffer_full)//.........这里部分代码省略.........
开发者ID:Bonfi96,项目名称:godot,代码行数:101,
示例19: MOZ_ASSERTintVorbisDataDecoder::DoDecode(MediaRawData* aSample){ const unsigned char* aData = aSample->Data(); size_t aLength = aSample->Size(); int64_t aOffset = aSample->mOffset; uint64_t aTstampUsecs = aSample->mTime; int64_t aTotalFrames = 0; MOZ_ASSERT(mPacketCount >= 3); if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) { // We are starting a new block. mFrames = 0; mLastFrameTime = Some(aSample->mTime); } ogg_packet pkt = InitVorbisPacket(aData, aLength, false, false, -1, mPacketCount++); bool first_packet = mPacketCount == 4; if (vorbis_synthesis(&mVorbisBlock, &pkt) != 0) { return -1; } if (vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock) != 0) { return -1; } VorbisPCMValue** pcm = 0; int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); // If the first packet of audio in the media produces no data, we // still need to produce an AudioData for it so that the correct media // start time is calculated. Otherwise we'd end up with a media start // time derived from the timecode of the first packet that produced // data. if (frames == 0 && first_packet) { mCallback->Output(new AudioData(aOffset, aTstampUsecs, 0, 0, nullptr, mVorbisDsp.vi->channels, mVorbisDsp.vi->rate)); } while (frames > 0) { uint32_t channels = mVorbisDsp.vi->channels; auto buffer = MakeUnique<AudioDataValue[]>(frames*channels); for (uint32_t j = 0; j < channels; ++j) { VorbisPCMValue* channel = pcm[j]; for (uint32_t i = 0; i < uint32_t(frames); ++i) { buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]); } } CheckedInt64 duration = FramesToUsecs(frames, mVorbisDsp.vi->rate); if (!duration.isValid()) { NS_WARNING("Int overflow converting WebM audio duration"); return -1; } CheckedInt64 total_duration = FramesToUsecs(mFrames, mVorbisDsp.vi->rate); if (!total_duration.isValid()) { NS_WARNING("Int overflow converting WebM audio total_duration"); return -1; } CheckedInt64 time = total_duration + aTstampUsecs; if (!time.isValid()) { NS_WARNING("Int overflow adding total_duration and aTstampUsecs"); return -1; }; aTotalFrames += frames; mCallback->Output(new AudioData(aOffset, time.value(), duration.value(), frames, Move(buffer), mVorbisDsp.vi->channels, mVorbisDsp.vi->rate)); mFrames += frames; if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) { return -1; } frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); } return aTotalFrames > 0 ? 1 : 0;}
开发者ID:Shaif95,项目名称:gecko-dev,代码行数:91,
示例20: decode_audiostatic int decode_audio(sh_audio_t *sh,unsigned char *buf,int minlen,int maxlen){ int len = 0; int samples;#ifdef CONFIG_TREMOR ogg_int32_t **pcm;#else float scale; float **pcm;#endif struct ov_struct_st *ov = sh->context; while(len < minlen) { while((samples=vorbis_synthesis_pcmout(&ov->vd,&pcm))<=0){ ogg_packet op; double pts; memset(&op,0,sizeof(op)); //op.b_o_s = op.e_o_s = 0; op.bytes = ds_get_packet_pts(sh->ds,&op.packet, &pts); if(op.bytes<=0) break; if (pts != MP_NOPTS_VALUE) { sh->pts = pts; sh->pts_bytes = 0; } if(vorbis_synthesis(&ov->vb,&op)==0) /* test for success! */ vorbis_synthesis_blockin(&ov->vd,&ov->vb); } if(samples<=0) break; // error/EOF while(samples>0){ int i,j; int clipflag=0; int convsize=(maxlen-len)/(2*ov->vi.channels); // max size! int bout=((samples<convsize)?samples:convsize); if(bout<=0) break; // no buffer space /* convert floats to 16 bit signed ints (host order) and interleave */#ifdef CONFIG_TREMOR if (ov->rg_scale_int == 64) { for(i=0;i<ov->vi.channels;i++){ ogg_int16_t *convbuffer=(ogg_int16_t *)(&buf[len]); ogg_int16_t *ptr=convbuffer+i; ogg_int32_t *mono=pcm[i]; for(j=0;j<bout;j++){ int val=mono[j]>>9; /* might as well guard against clipping */ if(val>32767){ val=32767; clipflag=1; } if(val<-32768){ val=-32768; clipflag=1; } *ptr=val; ptr+=ov->vi.channels; } } } else#endif /* CONFIG_TREMOR */ {#ifndef CONFIG_TREMOR scale = 32767.f * ov->rg_scale;#endif for(i=0;i<ov->vi.channels;i++){ ogg_int16_t *convbuffer=(ogg_int16_t *)(&buf[len]); ogg_int16_t *ptr=convbuffer+i;#ifdef CONFIG_TREMOR ogg_int32_t *mono=pcm[i]; for(j=0;j<bout;j++){ int val=(mono[j]*ov->rg_scale_int)>>(9+6);#else float *mono=pcm[i]; for(j=0;j<bout;j++){ int val=mono[j]*scale; /* might as well guard against clipping */ if(val>32767){ val=32767; clipflag=1; } if(val<-32768){ val=-32768; clipflag=1; }#endif /* CONFIG_TREMOR */ *ptr=val; ptr+=ov->vi.channels; } } } if(clipflag) mp_msg(MSGT_DECAUDIO,MSGL_DBG2,"Clipping in frame %ld/n",(long)(ov->vd.sequence)); len+=2*ov->vi.channels*bout; sh->pts_bytes += 2*ov->vi.channels*bout; mp_msg(MSGT_DECAUDIO,MSGL_DBG2,"/n[decoded: %d / %d ]/n",bout,samples); samples-=bout; vorbis_synthesis_read(&ov->vd,bout); /* tell libvorbis how many samples we actually consumed */ } //while(samples>0)//.........这里部分代码省略.........
开发者ID:0p1pp1,项目名称:mplayer,代码行数:101,
示例21: read_samplesstatic int read_samples(struct ast_filestream *fs, float ***pcm){ int samples_in; int result; char *buffer; int bytes; struct vorbis_desc *s = (struct vorbis_desc *)fs->_private; while (1) { samples_in = vorbis_synthesis_pcmout(&s->vd, pcm); if (samples_in > 0) { return samples_in; } /* The Vorbis decoder needs more data... */ /* See ifOGG has any packets in the current page for the Vorbis decoder. */ result = ogg_stream_packetout(&s->os, &s->op); if (result > 0) { /* Yes OGG had some more packets for the Vorbis decoder. */ if (vorbis_synthesis(&s->vb, &s->op) == 0) { vorbis_synthesis_blockin(&s->vd, &s->vb); } continue; } if (result < 0) ast_log(LOG_WARNING, "Corrupt or missing data at this page position; continuing.../n"); /* No more packets left in the current page... */ if (s->eos) { /* No more pages left in the stream */ return -1; } while (!s->eos) { /* See ifOGG has any pages in it's internal buffers */ result = ogg_sync_pageout(&s->oy, &s->og); if (result > 0) { /* Yes, OGG has more pages in it's internal buffers, add the page to the stream state */ result = ogg_stream_pagein(&s->os, &s->og); if (result == 0) { /* Yes, got a new,valid page */ if (ogg_page_eos(&s->og)) { s->eos = 1; } break; } ast_log(LOG_WARNING, "Invalid page in the bitstream; continuing.../n"); } if (result < 0) ast_log(LOG_WARNING, "Corrupt or missing data in bitstream; continuing.../n"); /* No, we need to read more data from the file descrptor */ /* get a buffer from OGG to read the data into */ buffer = ogg_sync_buffer(&s->oy, BLOCK_SIZE); /* read more data from the file descriptor */ bytes = fread(buffer, 1, BLOCK_SIZE, fs->f); /* Tell OGG how many bytes we actually read into the buffer */ ogg_sync_wrote(&s->oy, bytes); if (bytes == 0) { s->eos = 1; } } }}
开发者ID:sipwise,项目名称:asterisk,代码行数:72,
示例22: MOZ_ASSERTRefPtr<MediaDataDecoder::DecodePromise>VorbisDataDecoder::ProcessDecode(MediaRawData* aSample){ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); const unsigned char* aData = aSample->Data(); size_t aLength = aSample->Size(); int64_t aOffset = aSample->mOffset; MOZ_ASSERT(mPacketCount >= 3); if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) { // We are starting a new block. mFrames = 0; mLastFrameTime = Some(aSample->mTime.ToMicroseconds()); } ogg_packet pkt = InitVorbisPacket( aData, aLength, false, aSample->mEOS, aSample->mTimecode.ToMicroseconds(), mPacketCount++); int err = vorbis_synthesis(&mVorbisBlock, &pkt); if (err) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("vorbis_synthesis:%d", err)), __func__); } err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock); if (err) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)), __func__); } VorbisPCMValue** pcm = 0; int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); if (frames == 0) { return DecodePromise::CreateAndResolve(DecodedData(), __func__); } DecodedData results; while (frames > 0) { uint32_t channels = mVorbisDsp.vi->channels; uint32_t rate = mVorbisDsp.vi->rate; AlignedAudioBuffer buffer(frames*channels); if (!buffer) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__); } for (uint32_t j = 0; j < channels; ++j) { VorbisPCMValue* channel = pcm[j]; for (uint32_t i = 0; i < uint32_t(frames); ++i) { buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]); } } auto duration = FramesToTimeUnit(frames, rate); if (!duration.IsValid()) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Overflow converting audio duration")), __func__); } auto total_duration = FramesToTimeUnit(mFrames, rate); if (!total_duration.IsValid()) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Overflow converting audio total_duration")), __func__); } auto time = total_duration + aSample->mTime; if (!time.IsValid()) { return DecodePromise::CreateAndReject( MediaResult( NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, RESULT_DETAIL("Overflow adding total_duration and aSample->mTime")), __func__); }; if (!mAudioConverter) { AudioConfig in( AudioConfig::ChannelLayout(channels, VorbisLayout(channels)), rate); AudioConfig out(channels, rate); if (!in.IsValid() || !out.IsValid()) { return DecodePromise::CreateAndReject( MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, RESULT_DETAIL("Invalid channel layout:%u", channels)), __func__); } mAudioConverter = MakeUnique<AudioConverter>(in, out); } MOZ_ASSERT(mAudioConverter->CanWorkInPlace()); AudioSampleBuffer data(Move(buffer)); data = mAudioConverter->Process(Move(data));//.........这里部分代码省略.........
开发者ID:luke-chang,项目名称:gecko-1,代码行数:101,
示例23: main//.........这里部分代码省略......... if(bytes<4096)break; /* error case. Must not be Vorbis data */ fprintf(stderr,"Input does not appear to be an Ogg bitstream./n"); exit(1); } /* Get the serial number and set up the rest of decode. */ /* serialno first; use it to set up a logical stream */ ogg_stream_init(&os,ogg_page_serialno(&og)); /* extract the initial header from the first page and verify that the Ogg bitstream is in fact Vorbis data */ /* I handle the initial header first instead of just having the code read all three Vorbis headers at once because reading the initial header is an easy way to identify a Vorbis bitstream and it's useful to see that functionality seperated out. */ vorbis_info_init(&vi); vorbis_comment_init(&vc); if(ogg_stream_pagein(&os,&og)<0){ /* error; stream version mismatch perhaps */ fprintf(stderr,"Error reading first page of Ogg bitstream data./n"); exit(1); } if(ogg_stream_packetout(&os,&op)!=1){ /* no page? must not be vorbis */ fprintf(stderr,"Error reading initial header packet./n"); exit(1); } if(vorbis_synthesis_headerin(&vi,&vc,&op)<0){ /* error case; not a vorbis header */ fprintf(stderr,"This Ogg bitstream does not contain Vorbis " "audio data./n"); exit(1); } /* At this point, we're sure we're Vorbis. We've set up the logical (Ogg) bitstream decoder. Get the comment and codebook headers and set up the Vorbis decoder */ /* The next two packets in order are the comment and codebook headers. They're likely large and may span multiple pages. Thus we read and submit data until we get our two packets, watching that no pages are missing. If a page is missing, error out; losing a header page is the only place where missing data is fatal. */ i=0; while(i<2){ while(i<2){ int result=ogg_sync_pageout(&oy,&og); if(result==0)break; /* Need more data */ /* Don't complain about missing or corrupt data yet. We'll catch it at the packet output phase */ if(result==1){ ogg_stream_pagein(&os,&og); /* we can ignore any errors here as they'll also become apparent at packetout */ while(i<2){ result=ogg_stream_packetout(&os,&op); if(result==0)break; if(result<0){ /* Uh oh; data at some point was corrupted or missing!
开发者ID:KolorKode,项目名称:Stg,代码行数:67,
示例24: OGV_LoadAudio/* return: audio wants more packets*/static qboolean OGV_LoadAudio(cinematic_t *cin){ qboolean anyDataTransferred = qtrue; float **pcm; int frames, frameNeeded; int i, j; short *ptr; ogg_packet op; vorbis_block vb; memset(&op, 0, sizeof(op)); memset(&vb, 0, sizeof(vb)); vorbis_block_init(&g_ogm->vd, &vb); while (anyDataTransferred && g_ogm->currentTime + MAX_AUDIO_PRELOAD > (int)(g_ogm->vd.granulepos * 1000 / g_ogm->vi.rate)) { anyDataTransferred = qfalse; if ((frames = vorbis_synthesis_pcmout(&g_ogm->vd, &pcm)) > 0) { // vorbis -> raw ptr = (short *)g_ogm->audioBuffer; frameNeeded = (SIZEOF_RAWBUFF) / (OGG_SAMPLEWIDTH * g_ogm->vi.channels); if (frames < frameNeeded) { frameNeeded = frames; } for (i = 0; i < frameNeeded; i++) { for (j = 0; j < g_ogm->vi.channels; j++) { *(ptr++) = (short)((pcm[j][i] >= -1.0f && pcm[j][i] <= 1.0f) ? pcm[j][i] * 32767.f : 32767 * ((pcm[j][i] > 0.0f) - (pcm[j][i] < 0.0f))); } } // tell libvorbis how many samples we actually consumed (we ate them all!) vorbis_synthesis_read(&g_ogm->vd, frameNeeded); if (!(cin->flags & CIN_silent)) { S_RawSamples(0, frameNeeded, g_ogm->vi.rate, OGG_SAMPLEWIDTH, g_ogm->vi.channels, g_ogm->audioBuffer, 1.0f, 1.0f); } anyDataTransferred = qtrue; } if (!anyDataTransferred) { // op -> vorbis if (ogg_stream_packetout(&g_ogm->os_audio, &op)) { if (vorbis_synthesis(&vb, &op) == 0) { vorbis_synthesis_blockin(&g_ogm->vd, &vb); } anyDataTransferred = qtrue; } } } vorbis_block_clear(&vb); return (qboolean)(g_ogm->currentTime + MIN_AUDIO_PRELOAD > (int)(g_ogm->vd.granulepos * 1000 / g_ogm->vi.rate));}
开发者ID:dstaesse,项目名称:etlegacy,代码行数:71,
示例25: vorbis_handle_data_packetstatic GstFlowReturnvorbis_handle_data_packet (GstVorbisDec * vd, ogg_packet * packet, GstClockTime timestamp, GstClockTime duration){#ifdef USE_TREMOLO vorbis_sample_t *pcm;#else vorbis_sample_t **pcm;#endif guint sample_count; GstBuffer *out = NULL; GstFlowReturn result; GstMapInfo map; gsize size; if (G_UNLIKELY (!vd->initialized)) { result = vorbis_dec_handle_header_caps (vd); if (result != GST_FLOW_OK) goto not_initialized; } /* normal data packet */ /* FIXME, we can skip decoding if the packet is outside of the * segment, this is however not very trivial as we need a previous * packet to decode the current one so we must be careful not to * throw away too much. For now we decode everything and clip right * before pushing data. */#ifdef USE_TREMOLO if (G_UNLIKELY (vorbis_dsp_synthesis (&vd->vd, packet, 1))) goto could_not_read;#else if (G_UNLIKELY (vorbis_synthesis (&vd->vb, packet))) goto could_not_read; if (G_UNLIKELY (vorbis_synthesis_blockin (&vd->vd, &vd->vb) < 0)) goto not_accepted;#endif /* assume all goes well here */ result = GST_FLOW_OK; /* count samples ready for reading */#ifdef USE_TREMOLO if ((sample_count = vorbis_dsp_pcmout (&vd->vd, NULL, 0)) == 0)#else if ((sample_count = vorbis_synthesis_pcmout (&vd->vd, NULL)) == 0) goto done;#endif size = sample_count * vd->info.bpf; GST_LOG_OBJECT (vd, "%d samples ready for reading, size %" G_GSIZE_FORMAT, sample_count, size); /* alloc buffer for it */ out = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (vd), size); gst_buffer_map (out, &map, GST_MAP_WRITE); /* get samples ready for reading now, should be sample_count */#ifdef USE_TREMOLO if (G_UNLIKELY (vorbis_dsp_pcmout (&vd->vd, map.data, sample_count) != sample_count))#else if (G_UNLIKELY (vorbis_synthesis_pcmout (&vd->vd, &pcm) != sample_count))#endif goto wrong_samples;#ifdef USE_TREMOLO if (vd->info.channels < 9) gst_audio_reorder_channels (map.data, map.size, GST_VORBIS_AUDIO_FORMAT, vd->info.channels, gst_vorbis_channel_positions[vd->info.channels - 1], gst_vorbis_default_channel_positions[vd->info.channels - 1]);#else /* copy samples in buffer */ vd->copy_samples ((vorbis_sample_t *) map.data, pcm, sample_count, vd->info.channels);#endif GST_LOG_OBJECT (vd, "have output size of %" G_GSIZE_FORMAT, size); gst_buffer_unmap (out, &map);done: /* whether or not data produced, consume one frame and advance time */ result = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (vd), out, 1);#ifdef USE_TREMOLO vorbis_dsp_read (&vd->vd, sample_count);#else vorbis_synthesis_read (&vd->vd, sample_count);#endif return result; /* ERRORS */not_initialized: { GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE, (NULL), ("no header sent yet")); return GST_FLOW_NOT_NEGOTIATED; }//.........这里部分代码省略.........
开发者ID:Lachann,项目名称:gst-plugins-base,代码行数:101,
示例26: vorbis_read_samplestatic sf_count_tvorbis_read_sample (SF_PRIVATE *psf, void *ptr, sf_count_t lens, convert_func *transfn){ VORBIS_PRIVATE *vdata = psf->codec_data ; OGG_PRIVATE *odata = psf->container_data ; int len, samples, i = 0 ; float **pcm ; len = lens / psf->sf.channels ; while ((samples = vorbis_synthesis_pcmout (&vdata->vdsp, &pcm)) > 0) { if (samples > len) samples = len ; i += transfn (psf, samples, ptr, i, psf->sf.channels, pcm) ; len -= samples ; /* tell libvorbis how many samples we actually consumed */ vorbis_synthesis_read (&vdata->vdsp, samples) ; vdata->loc += samples ; if (len == 0) return i ; /* Is this necessary */ } goto start0 ; /* Jump into the nasty nest */ while (len > 0 && !odata->eos) { while (len > 0 && !odata->eos) { int result = ogg_sync_pageout (&odata->osync, &odata->opage) ; if (result == 0) break ; /* need more data */ if (result < 0) { /* missing or corrupt data at this page position */ psf_log_printf (psf, "Corrupt or missing data in bitstream ; continuing.../n") ; } else { /* can safely ignore errors at this point */ ogg_stream_pagein (&odata->ostream, &odata->opage) ;start0: while (1) { result = ogg_stream_packetout (&odata->ostream, &odata->opacket) ; if (result == 0) break ; /* need more data */ if (result < 0) { /* missing or corrupt data at this page position */ /* no reason to complain ; already complained above */ } else { /* we have a packet. Decode it */ if (vorbis_synthesis (&vdata->vblock, &odata->opacket) == 0) /* test for success! */ vorbis_synthesis_blockin (&vdata->vdsp, &vdata->vblock) ; /* ** pcm is a multichannel float vector. In stereo, for ** example, pcm [0] is left, and pcm [1] is right. samples is ** the size of each channel. Convert the float values ** (-1.<=range<=1.) to whatever PCM format and write it out. */ while ((samples = vorbis_synthesis_pcmout (&vdata->vdsp, &pcm)) > 0) { if (samples > len) samples = len ; i += transfn (psf, samples, ptr, i, psf->sf.channels, pcm) ; len -= samples ; /* tell libvorbis how many samples we actually consumed */ vorbis_synthesis_read (&vdata->vdsp, samples) ; vdata->loc += samples ; if (len == 0) return i ; /* Is this necessary */ } ; } } if (ogg_page_eos (&odata->opage)) odata->eos = 1 ; } } if (!odata->eos) { char *buffer ; int bytes ; buffer = ogg_sync_buffer (&odata->osync, 4096) ; bytes = psf_fread (buffer, 1, 4096, psf) ; ogg_sync_wrote (&odata->osync, bytes) ; if (bytes == 0) odata->eos = 1 ; } } return i ;} /* vorbis_read_sample */
开发者ID:stohrendorf,项目名称:libsndfile,代码行数:77,
示例27: FXASSERTDecoderStatus VorbisDecoder::process(Packet * packet) { FXASSERT(packet);#ifdef HAVE_VORBIS_PLUGIN FXfloat ** pcm=NULL; FXfloat * buf32=NULL;#else // HAVE_TREMOR_PLUGIN FXint ** pcm=NULL; FXshort * buf32=NULL;#endif FXint p,navail=0; FXint ngiven,ntotalsamples,nsamples,sample,c,s; FXbool eos=packet->flags&FLAG_EOS; FXuint id=packet->stream; FXlong len=packet->stream_length; OggDecoder::process(packet); /// Init Decoder if (!has_dsp) { if (!init_decoder()) return DecoderError; if (!has_dsp) return DecoderOk; } /// Find Stream Position if (stream_position==-1 && !find_stream_position()) return DecoderOk; if (out) { navail = out->availableFrames(); } while(get_next_packet()) { if (__unlikely(is_vorbis_header())) { GM_DEBUG_PRINT("[vorbis] unexpected vorbis header found. Resetting decoder/n"); push_back_packet(); reset_decoder(); return DecoderOk; } if (vorbis_synthesis(&block,&op)==0) vorbis_synthesis_blockin(&dsp,&block); while((ngiven=vorbis_synthesis_pcmout(&dsp,&pcm))>0) { if (len>0) FXASSERT(stream_position+ngiven<=len); if (__unlikely(stream_position<stream_decode_offset)) { FXlong offset = FXMIN(ngiven,stream_decode_offset - stream_position); GM_DEBUG_PRINT("[vorbis] stream decode offset %ld. Skipping %ld of %ld /n",stream_decode_offset,offset,stream_decode_offset-stream_position); ngiven-=offset; stream_position+=offset; sample=offset; vorbis_synthesis_read(&dsp,offset); if (ngiven==0) continue; } else { sample=0; } for (ntotalsamples=ngiven;ntotalsamples>0;) { /// Get new buffer if (out==NULL) { out = engine->decoder->get_output_packet(); if (out==NULL) return DecoderInterrupted; out->stream_position=stream_position; out->stream_length=len; out->af=af; navail = out->availableFrames(); }#ifdef HAVE_VORBIS_PLUGIN buf32 = out->flt();#else // HAVE_TREMOR_PLUGIN buf32 = out->s16();#endif /// Copy Samples nsamples = FXMIN(ntotalsamples,navail); for (p=0,s=sample;s<(nsamples+sample);s++){ for (c=0;c<info.channels;c++,p++) {#ifdef HAVE_VORBIS_PLUGIN buf32[p]=pcm[c][s];#else buf32[p]=CLIP_TO_15(pcm[c][s]>>9);#endif } } /// Update sample counts out->wroteFrames(nsamples); sample+=nsamples;//.........这里部分代码省略.........
开发者ID:sophom,项目名称:gogglesmm,代码行数:101,
示例28: mainint main(int argc,char *argv[]){ int i,j; ogg_packet op; FILE *infile = stdin;#ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */ /* Beware the evil ifdef. We avoid these where we can, but this one we cannot. Don't add any more, you'll probably go to hell if you do. */ _setmode( _fileno( stdin ), _O_BINARY );#endif /* open the input file if any */ if(argc==2){ infile=fopen(argv[1],"rb"); if(infile==NULL){ fprintf(stderr,"Unable to open '%s' for playback./n", argv[1]); exit(1); } } if(argc>2){ usage(); exit(1); } /* start up Ogg stream synchronization layer */ ogg_sync_init(&oy); /* init supporting Vorbis structures needed in header parsing */ vorbis_info_init(&vi); vorbis_comment_init(&vc); /* init supporting Theora structures needed in header parsing */ theora_comment_init(&tc); theora_info_init(&ti); /* Ogg file open; parse the headers */ /* Only interested in Vorbis/Theora streams */ while(!stateflag){ int ret=buffer_data(infile,&oy); if(ret==0)break; while(ogg_sync_pageout(&oy,&og)>0){ ogg_stream_state test; /* is this a mandated initial header? If not, stop parsing */ if(!ogg_page_bos(&og)){ /* don't leak the page; get it into the appropriate stream */ queue_page(&og); stateflag=1; break; } ogg_stream_init(&test,ogg_page_serialno(&og)); ogg_stream_pagein(&test,&og); ogg_stream_packetout(&test,&op); /* identify the codec: try theora */ if(!theora_p && theora_decode_header(&ti,&tc,&op)>=0){ /* it is theora */ memcpy(&to,&test,sizeof(test)); theora_p=1; }else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){ /* it is vorbis */ memcpy(&vo,&test,sizeof(test)); vorbis_p=1; }else{ /* whatever it is, we don't care about it */ ogg_stream_clear(&test); } } /* fall through to non-bos page parsing */ } /* we're expecting more header packets. */ while((theora_p && theora_p<3) || (vorbis_p && vorbis_p<3)){ int ret; /* look for further theora headers */ while(theora_p && (theora_p<3) && (ret=ogg_stream_packetout(&to,&op))){ if(ret<0){ fprintf(stderr,"Error parsing Theora stream headers; corrupt stream?/n"); exit(1); } if(theora_decode_header(&ti,&tc,&op)){ printf("Error parsing Theora stream headers; corrupt stream?/n"); exit(1); } theora_p++; if(theora_p==3)break; } /* look for more vorbis header packets */ while(vorbis_p && (vorbis_p<3) && (ret=ogg_stream_packetout(&vo,&op))){ if(ret<0){ fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?/n"); exit(1); } if(vorbis_synthesis_headerin(&vi,&vc,&op)){ fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?/n");//.........这里部分代码省略.........
开发者ID:petterreinholdtsen,项目名称:cinelerra-hv,代码行数:101,
注:本文中的vorbis_synthesis函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ vorbis_synthesis_blockin函数代码示例 C++ vorbis_info_init函数代码示例 |