这篇教程C++ vorbis_analysis_buffer函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中vorbis_analysis_buffer函数的典型用法代码示例。如果您正苦于以下问题:C++ vorbis_analysis_buffer函数的具体用法?C++ vorbis_analysis_buffer怎么用?C++ vorbis_analysis_buffer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了vorbis_analysis_buffer函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: vorbis_analysis_buffervoid EncoderVorbis::encodeBuffer(const CSAMPLE *samples, const int size) { float **buffer = vorbis_analysis_buffer(&m_vdsp, size); // Deinterleave samples. We use normalized floats in the engine [-1.0, 1.0] // and libvorbis expects samples in the range [-1.0, 1.0] so no conversion // is required. for (int i = 0; i < size/2; ++i) { buffer[0][i] = samples[i*2]; buffer[1][i] = samples[i*2+1]; } /** encodes audio **/ vorbis_analysis_wrote(&m_vdsp, size/2); /** writes the OGG page and sends it to file or stream **/ writePage();}
开发者ID:dk0104,项目名称:mixxx,代码行数:15,
示例2: ogg_write_dstatic sf_count_togg_write_d (SF_PRIVATE *psf, const double *ptr, sf_count_t lens){ int i, m, j = 0 ; OGG_PRIVATE *odata = (OGG_PRIVATE *) psf->container_data ; VORBIS_PRIVATE *vdata = (VORBIS_PRIVATE *) psf->codec_data ; int in_frames = lens / psf->sf.channels ; float **buffer = vorbis_analysis_buffer (&vdata->vd, in_frames) ; for (i = 0 ; i < in_frames ; i++) for (m = 0 ; m < psf->sf.channels ; m++) buffer [m][i] = (float) ptr [j++] ; ogg_write_samples (psf, odata, vdata, in_frames) ; return lens ;} /* ogg_write_d */
开发者ID:dpalkowski,项目名称:iSuperColliderKit,代码行数:15,
示例3: vorbis_write_istatic sf_count_tvorbis_write_i (SF_PRIVATE *psf, const int *ptr, sf_count_t lens){ int i, m, j = 0 ; OGG_PRIVATE *odata = (OGG_PRIVATE *) psf->container_data ; VORBIS_PRIVATE *vdata = (VORBIS_PRIVATE *) psf->codec_data ; int in_frames = lens / psf->sf.channels ; float **buffer = vorbis_analysis_buffer (&vdata->vdsp, in_frames) ; for (i = 0 ; i < in_frames ; i++) for (m = 0 ; m < psf->sf.channels ; m++) buffer [m][i] = (float) (ptr [j++]) / 2147483647.0f ; vorbis_write_samples (psf, odata, vdata, in_frames) ; return lens ;} /* vorbis_write_i */
开发者ID:naxxfish,项目名称:libsndfile,代码行数:15,
示例4: xmms_ices_encoder_input/* Encode the given data into Ogg Vorbis. */void xmms_ices_encoder_input (encoder_state *s, xmms_samplefloat_t *buf, int bytes){ float **buffer; int i,j; int channels = s->vi.channels; int samples = bytes / (sizeof (xmms_samplefloat_t)*channels); buffer = vorbis_analysis_buffer (&s->vd, samples); for (i = 0; i < samples; i++) for (j = 0; j < channels; j++) buffer[j][i] = buf[i*channels + j]; vorbis_analysis_wrote (&s->vd, samples); s->samples_in_current_page += samples;}
开发者ID:eggpi,项目名称:xmms2-guilherme,代码行数:17,
示例5: encode_data_floatvoid encode_data_float(encoder_state *s, float **pcm, int samples){ float **buf; int i; buf = vorbis_analysis_buffer(&s->vd, samples); for(i=0; i < s->vi.channels; i++) { memcpy(buf[i], pcm[i], samples*sizeof(float)); } vorbis_analysis_wrote(&s->vd, samples); s->samples_in_current_page += samples;}
开发者ID:miksago,项目名称:icecast,代码行数:16,
示例6: SyncEncodeSoundBuffervoid SyncEncodeSoundBuffer(ProgData *pdata,signed char *buff){ float **vorbis_buffer; int count=0,i,j; int sampread=(buff!=NULL)?pdata->periodsize:0; vorbis_buffer=vorbis_analysis_buffer(&pdata->enc_data->m_vo_dsp,sampread); if(!pdata->args.use_jack){ for(i=0;i<sampread;i++){ for(j=0;j<pdata->args.channels;j++){ vorbis_buffer[j][i]=((buff[count+1]<<8)| (0x00ff&(int)buff[count]))/ 32768.f; count+=2; } } } else{ for(j=0;j<pdata->args.channels;j++){ for(i=0;i<sampread;i++){ vorbis_buffer[j][i]=((float*)buff)[count]; count++; } } } vorbis_analysis_wrote(&pdata->enc_data->m_vo_dsp,sampread); pthread_mutex_lock(&pdata->libogg_mutex); while(vorbis_analysis_blockout(&pdata->enc_data->m_vo_dsp, &pdata->enc_data->m_vo_block)==1){ vorbis_analysis(&pdata->enc_data->m_vo_block,NULL); vorbis_bitrate_addblock(&pdata->enc_data->m_vo_block); while(vorbis_bitrate_flushpacket(&pdata->enc_data->m_vo_dsp, &pdata->enc_data->m_ogg_pckt2)){ ogg_stream_packetin(&pdata->enc_data->m_ogg_vs, &pdata->enc_data->m_ogg_pckt2); } } pthread_mutex_unlock(&pdata->libogg_mutex); if(!pdata->running)pdata->enc_data->m_ogg_vs.e_o_s=1; pdata->avd-=pdata->periodtime;}
开发者ID:minlexx,项目名称:recordmydesktop-pulse,代码行数:47,
示例7: Processbool ShoutVSTEncoderOGG::Process( float **inputs, long sampleFrames ){ if (!bInitialized) return false; float **buffer=vorbis_analysis_buffer(&vd,sampleFrames); /* uninterleave samples */ for(int i=0;i<sampleFrames;i++){ buffer[0][i] = inputs[0][i]; buffer[1][i] = inputs[1][i]; } /* tell the library how much we actually submitted */ vorbis_analysis_wrote(&vd,sampleFrames); /* vorbis does some data preanalysis, then divvies up blocks for more involved (potentially parallel) processing. Get a single block for encoding now */ int eos = 0; while(vorbis_analysis_blockout(&vd,&vb)==1){ /* analysis, assume we want to use bitrate management */ vorbis_analysis(&vb,NULL); vorbis_bitrate_addblock(&vb); while(vorbis_bitrate_flushpacket(&vd,&op)){ /* weld the packet into the bitstream */ ogg_stream_packetin(&os,&op); /* write out pages (if any) */ while(!eos){ int result=ogg_stream_pageout(&os,&og); if(result==0)break; if (!SendOGGPageToICE(&og)) return false; /* this could be set above, but for illustrative purposes, I do it here (to show that vorbis does know where the stream ends) */ if(ogg_page_eos(&og))eos=1; } } } return true;}
开发者ID:svn2github,项目名称:shoutvst,代码行数:47,
示例8: vorbis_analysis_bufferint FileVorbis::write_samples(double **buffer, int64_t len){ if(!fd) return 0; float **vorbis_buffer = vorbis_analysis_buffer(&vd, len); for(int i = 0; i < asset->channels; i++) { float *output = vorbis_buffer[i]; double *input = buffer[i]; for(int j = 0; j < len; j++) { output[j] = input[j]; } } vorbis_analysis_wrote(&vd, len); FLUSH_VORBIS return 0;}
开发者ID:knutj,项目名称:cinelerra,代码行数:20,
示例9: whilevoid TargetFileOggVorbis::performWrite( const Buffer *buffer, size_t numFrames, size_t frameOffset ){ // process incoming buffer in chunks of maximum mVorbisBufferSize, this prevents memory allocation errors auto currFrame = frameOffset; auto lastFrame = frameOffset + numFrames; while ( currFrame != lastFrame ) { auto numFramesChunk = std::min( mVorbisBufferSize, lastFrame - currFrame ); float ** bufferOgg = vorbis_analysis_buffer( &mVorbisDspState, (int)numFramesChunk ); for ( size_t c = 0; c < getNumChannels(); ++c ) { std::memcpy( bufferOgg[ c ], buffer->getChannel( c ) + currFrame, numFramesChunk * sizeof( float ) ); } vorbis_analysis_wrote( &mVorbisDspState, (int)numFramesChunk ); processAndWriteVorbisBlocks(); currFrame += numFramesChunk; }}
开发者ID:Ahbee,项目名称:Cinder,代码行数:20,
示例10: output_datastatic int output_data(char *readbuffer, int32 bytes){ int i, j, ch = ((dpm.encoding & PE_MONO) ? 1 : 2); double **buffer; int16 *samples = (int16 *)readbuffer; int nsamples = bytes / (2 * ch); ogg_page og; /* one Ogg bitstream page. Vorbis packets are inside */ ogg_packet op; /* one raw packet of data for decode */ /* data to encode */ /* expose the buffer to submit data */ buffer = vorbis_analysis_buffer(&vd, nsamples); /* uninterleave samples */ for(j = 0; j < ch; j++) for(i = 0; i < nsamples; i++) buffer[j][i] = samples[i*ch+j] * (1.0/32768.0); /* tell the library how much we actually submitted */ vorbis_analysis_wrote(&vd, nsamples); /* vorbis does some data preanalysis, then divvies up blocks for more involved (potentially parallel) processing. Get a single block for encoding now */ while(vorbis_analysis_blockout(&vd, &vb) == 1) { /* analysis */ vorbis_analysis(&vb, &op); /* weld the packet into the bitstream */ ogg_stream_packetin(&os, &op); /* write out pages (if any) */ while(ogg_stream_pageout(&os, &og) != 0) { write(dpm.fd, og.header, og.header_len); write(dpm.fd, og.body, og.body_len); } } return 0;}
开发者ID:OS2World,项目名称:MM-SOUND-TiMidity-MCD,代码行数:41,
示例11: vorbis_encoder_writestatic boolvorbis_encoder_write(struct encoder *_encoder, const void *data, size_t length, G_GNUC_UNUSED GError **error){ struct vorbis_encoder *encoder = (struct vorbis_encoder *)_encoder; unsigned num_frames; num_frames = length / audio_format_frame_size(&encoder->audio_format); /* this is for only 16-bit audio */ pcm16_to_vorbis_buffer(vorbis_analysis_buffer(&encoder->vd, num_frames), (const int16_t *)data, num_frames, encoder->audio_format.channels); vorbis_analysis_wrote(&encoder->vd, num_frames); vorbis_encoder_blockout(encoder); return true;}
开发者ID:andrewrk,项目名称:mpd,代码行数:21,
示例12: vorbis_analysis_buffer/* * Encode length bytes of audio from the packet into Ogg stream */PRBoolMediaRecorder::EncodeAudio(PRInt16 *a_frames, int len){ int i, j, n; float **a_buffer; /* Uninterleave samples */ n = len / aState->backend->GetFrameSize(); a_buffer = vorbis_analysis_buffer(&aState->vd, n); for (i = 0; i < n; i++){ for (j = 0; j < (int)params->chan; j++) { a_buffer[j][i] = (float)((float)a_frames[i+j] / 32768.f); } } /* Tell libvorbis to do its thing */ vorbis_analysis_wrote(&aState->vd, n); WriteAudio(); return PR_TRUE;}
开发者ID:1981khj,项目名称:rainbow,代码行数:24,
示例13: vorbis_writestatic int vorbis_write(SWORD *pbuf, size_t nr){ float **buffer; size_t i; size_t amount = (stereo) ? nr / 2 : nr; int result; int eos = 0; buffer = vorbis_analysis_buffer(&vd, (int)amount); for (i = 0; i < amount; i++) { if (stereo == 1) { buffer[0][i]= pbuf[i * 2] / 32768.f; buffer[1][i]= pbuf[(i * 2) + 1] / 32768.f; } else { buffer[0][i]= pbuf[i] / 32768.f; } } vorbis_analysis_wrote(&vd, (int)i); while (vorbis_analysis_blockout(&vd, &vb) == 1) { vorbis_analysis(&vb, NULL); vorbis_bitrate_addblock(&vb); while (vorbis_bitrate_flushpacket(&vd, &op)) { ogg_stream_packetin(&os, &op); while(!eos) { result = ogg_stream_pageout(&os, &og); if (!result) { break; } fwrite(og.header, 1, (size_t)(og.header_len), vorbis_fd); fwrite(og.body, 1, (size_t)(og.body_len), vorbis_fd); if (ogg_page_eos(&og)) { eos = 1; } } } } return 0;}
开发者ID:Rakashazi,项目名称:emu-ex-plus-alpha,代码行数:40,
示例14: process_framesstatic void process_frames(void *self) { VORBIS_STREAM *stream; size_t frames; ogg_packet header, header_comm, header_code; float **vorbuf; size_t i, j; stream = (VORBIS_STREAM *)self; frames = stream->header.bookmark; if (stream->header.init == 0) { stream->header.init = 1; vorbis_analysis_headerout(&(stream->vd), &(stream->vc), &header, &header_comm, &header_code); ogg_stream_packetin(&(stream->os), &header); ogg_stream_packetin(&(stream->os), &header_comm); ogg_stream_packetin(&(stream->os), &header_code); while (ogg_stream_flush(&(stream->os), &(stream->og))) vorbis_send(stream); } vorbuf = vorbis_analysis_buffer(&(stream->vd), frames); for (i = 0; i < QMX_CHANNELS; i++) { for (j = 0; j < frames; j++) { vorbuf[i][j] = (float)(distch[i][j]); } } vorbis_analysis_wrote(&(stream->vd), frames); while (vorbis_analysis_blockout(&(stream->vd), &(stream->vb)) == 1) { vorbis_analysis(&(stream->vb), NULL); //&(stream->op)); vorbis_bitrate_addblock(&(stream->vb)); while (vorbis_bitrate_flushpacket(&(stream->vd), &(stream->op))) { ogg_stream_packetin(&(stream->os), &(stream->op)); } } while (ogg_stream_pageout(&(stream->os), &(stream->og))) vorbis_send(stream); stream->header.bookmark = 0; return; }
开发者ID:pmyadlowsky,项目名称:qmx,代码行数:39,
示例15: ogg_vorbis_write/*! * /brief Write audio data from a frame to an OGG/Vorbis filestream. * /param fs An OGG/Vorbis filestream. * /param f A frame containing audio to be written to the filestream. * /return -1 if there was an error, 0 on success. */static int ogg_vorbis_write(struct ast_filestream *fs, struct ast_frame *f){ int i; float **buffer; short *data; struct ogg_vorbis_desc *s = (struct ogg_vorbis_desc *) fs->_private; if (!s->writing) { ast_log(LOG_ERROR, "This stream is not set up for writing!/n"); return -1; } if (f->frametype != AST_FRAME_VOICE) { ast_log(LOG_WARNING, "Asked to write non-voice frame!/n"); return -1; } if (f->subclass.format.id != AST_FORMAT_SLINEAR) { ast_log(LOG_WARNING, "Asked to write non-SLINEAR frame (%s)!/n", ast_getformatname(&f->subclass.format)); return -1; } if (!f->datalen) return -1; data = (short *) f->data.ptr; buffer = vorbis_analysis_buffer(&s->vd, f->samples); for (i = 0; i < f->samples; i++) buffer[0][i] = (double)data[i] / 32768.0; vorbis_analysis_wrote(&s->vd, f->samples); write_stream(s, fs->f); s->writing_pcm_pos += f->samples; return 0;}
开发者ID:aderbas,项目名称:asterisk,代码行数:45,
示例16: vorbis_write_realstatic void vorbis_write_real (void * data, gint length){ int samples = length / sizeof (float); int channel, result; float * end = (float *) data + samples; float * * buffer = vorbis_analysis_buffer (& vd, samples / input.channels); float * from, * to; for (channel = 0; channel < input.channels; channel ++) { to = buffer[channel]; for (from = (float *) data + channel; from < end; from += input.channels) * to ++ = * from; } vorbis_analysis_wrote (& vd, samples / input.channels); while(vorbis_analysis_blockout(&vd, &vb) == 1) { vorbis_analysis(&vb, &op); vorbis_bitrate_addblock(&vb); while (vorbis_bitrate_flushpacket(&vd, &op)) { ogg_stream_packetin(&os, &op); while ((result = ogg_stream_pageout(&os, &og))) { if (result == 0) break; write_output(og.header, og.header_len); write_output(og.body, og.body_len); } } }}
开发者ID:ivan-dives,项目名称:audacious-plugins,代码行数:38,
示例17: ogg_vorbis_write/*! * /brief Write audio data from a frame to an OGG/Vorbis filestream. * /param s A OGG/Vorbis filestream. * /param f An frame containing audio to be written to the filestream. * /return -1 ifthere was an error, 0 on success. */static int ogg_vorbis_write(struct cw_filestream *s, struct cw_frame *f){ int i; float **buffer; short *data; if (!s->writing) { cw_log(LOG_ERROR, "This stream is not set up for writing!/n"); return -1; } if (f->frametype != CW_FRAME_VOICE) { cw_log(LOG_WARNING, "Asked to write non-voice frame!/n"); return -1; } if (f->subclass != CW_FORMAT_SLINEAR) { cw_log(LOG_WARNING, "Asked to write non-SLINEAR frame (%d)!/n", f->subclass); return -1; } if (!f->datalen) return -1; data = (short *) f->data; buffer = vorbis_analysis_buffer(&s->vd, f->samples); for (i = 0; i < f->samples; i++) buffer[0][i] = data[i]/32768.f; vorbis_analysis_wrote(&s->vd, f->samples); write_stream(s); return 0;}
开发者ID:wildzero-cw,项目名称:callweaver,代码行数:44,
示例18: Encode/*********************************************************************** * Encode *********************************************************************** * **********************************************************************/static hb_buffer_t* Encode(hb_work_object_t *w){ hb_work_private_t *pv = w->private_data; hb_buffer_t *buf; float **buffer; int i, j; /* Try to extract more data */ if ((buf = Flush(w)) != NULL) { return buf; } /* Check if we need more data */ if (hb_list_bytes(pv->list) < pv->input_samples * sizeof(float)) { return NULL; } /* Process more samples */ hb_list_getbytes(pv->list, pv->buf, pv->input_samples * sizeof(float), &pv->pts, NULL); buffer = vorbis_analysis_buffer(&pv->vd, OGGVORBIS_FRAME_SIZE); for (i = 0; i < OGGVORBIS_FRAME_SIZE; i++) { for (j = 0; j < pv->out_discrete_channels; j++) { buffer[j][i] = ((float*)pv->buf)[(pv->out_discrete_channels * i + pv->remap_table[j])]; } } vorbis_analysis_wrote(&pv->vd, OGGVORBIS_FRAME_SIZE); /* Try to extract again */ return Flush(w);}
开发者ID:evolver56k,项目名称:HandBrake,代码行数:42,
示例19: sizeof void OggWriter::writeFrame( const char * sample, std::size_t nSamples ) { const int SAMPLES = nSamples / sizeof( short int ) / this->getChannels(); const short int * audio = static_cast< const short int * >( static_cast< const void * >( sample ) ); float * * buffer = vorbis_analysis_buffer( this->dsp_.get(), SAMPLES ); if( this->getChannels() == 1 ) { for( int l = 0; l < SAMPLES; ++l ) { buffer[0][l] = audio[l] / 32768.f; } } else { for( int l = 0; l < SAMPLES; ++l ) { buffer[0][l] = audio[l*2] / 32768.f; buffer[1][l] = audio[l*2+1] / 32768.f; } } vorbis_analysis_wrote( this->dsp_.get(), SAMPLES ); while( vorbis_analysis_blockout( this->dsp_.get(), this->block_.get() ) == 1 ) { vorbis_analysis( this->block_.get(), NULL ); vorbis_bitrate_addblock( this->block_.get() ); ogg_packet op; while( vorbis_bitrate_flushpacket( this->dsp_.get(), &op ) ) { ogg_stream_packetin( this->muxer_.get(), &op ); } for(;;) { ogg_page og; int ret = ogg_stream_pageout( this->muxer_.get(), &og ); if( ret == 0 ) { break; } std::fwrite( og.header, 1, og.header_len, this->fout_.get() ); std::fwrite( og.body, 1, og.body_len, this->fout_.get() ); } } }
开发者ID:legnaleurc,项目名称:khopper-pkg-debian,代码行数:37,
示例20: encode_data/* Requires little endian data (currently) */void encode_data(encoder_state *s, signed char *buf, int bytes, int bigendian){ float **buffer; int i,j; int channels = s->vi.channels; int samples = bytes/(2*channels); buffer = vorbis_analysis_buffer(&s->vd, samples); if(bigendian) { for(i=0; i < samples; i++) { for(j=0; j < channels; j++) { buffer[j][i]=((buf[2*(i*channels + j)]<<8) | (0x00ff&(int)buf[2*(i*channels + j)+1]))/32768.f; } } } else { for(i=0; i < samples; i++) { for(j=0; j < channels; j++) { buffer[j][i]=((buf[2*(i*channels + j) + 1]<<8) | (0x00ff&(int)buf[2*(i*channels + j)]))/32768.f; } } } vorbis_analysis_wrote(&s->vd, samples); s->samples_in_current_page += samples;}
开发者ID:miksago,项目名称:icecast,代码行数:37,
示例21: _create_audio_bufferstatic GstBuffer *_create_audio_buffer (void){ GstBuffer *buffer; ogg_packet packet; float **vorbis_buffer G_GNUC_UNUSED; vorbis_buffer = vorbis_analysis_buffer (&vd, 0); vorbis_analysis_wrote (&vd, 0); vorbis_analysis_blockout (&vd, &vb); vorbis_analysis (&vb, NULL); vorbis_bitrate_addblock (&vb); vorbis_bitrate_flushpacket (&vd, &packet); buffer = gst_buffer_new_and_alloc (packet.bytes); gst_buffer_fill (buffer, 0, packet.packet, packet.bytes); GST_DEBUG ("%p %d", packet.packet, packet.bytes); vorbis_comment_clear (&vc); vorbis_block_clear (&vb); vorbis_dsp_clear (&vd); vorbis_info_clear (&vi); return buffer;}
开发者ID:rawoul,项目名称:gst-plugins-base,代码行数:24,
示例22: whilestatic void *recordingThreadFct(void *data){ enum { bufInCount=1024, bufInSize=2*bufInCount, }; short int bufIn[bufInSize]; char *bufInBytes=(char*)bufIn; ogg_packet op; RecordingParams *rp=(RecordingParams*)data; int l; while (1) { sb_lock(recBuffer); if (recBuffer->usedCount>=bufInSize) { //buffer contains enough data, let's encode it sb_unlock(recBuffer); sb_retrieveData(recBuffer, bufIn, bufInSize); //convert PCM to OGG library compatible format float **buffer = vorbis_analysis_buffer(&rp->vd, bufInCount); if (rp->vi.channels==1) { for(l=0; l<bufInCount; l++) { buffer[0][l]=((bufInBytes[l*2+1]<<8)|(0x00ff&(int)bufInBytes[l*2]))/32768.f; } } else { for(l=0; l<bufInCount; l++) { buffer[0][l]=((bufInBytes[l*4+1]<<8)|(0x00ff&(int)bufInBytes[l*4]))/32768.f; buffer[1][l]=((bufInBytes[l*4+3]<<8)|(0x00ff&(int)bufInBytes[l*4+2]))/32768.f; } } //encode and write out vorbis_analysis_wrote(&rp->vd, bufInCount); while(vorbis_analysis_blockout(&rp->vd, &rp->vb) == 1) { vorbis_analysis(&rp->vb, NULL); vorbis_bitrate_addblock(&rp->vb); while(vorbis_bitrate_flushpacket(&rp->vd, &op)) { ogg_stream_packetin(&rp->os,&op); ogg_flushall(rp); } } } else { //not much in the buffer, wait for a while to get new data sb_unlock(recBuffer);#ifndef _WIN32 usleep(100*1000);#else Sleep(100);#endif } pthread_mutex_lock(&threadRunMutex); if (threadRunStop) { pthread_mutex_unlock(&threadRunMutex); break; } pthread_mutex_unlock(&threadRunMutex); } //close encoder and output file vorbis_analysis_wrote(&rp->vd, 0); ogg_flushall(rp); vorbis_block_clear(&rp->vb); vorbis_dsp_clear(&rp->vd); vorbis_info_clear(&rp->vi); fclose(rp->outputFile); //empty output buffer pthread_spin_lock(&recBufferLock); sb_destroyBuffer(recBuffer); recBuffer=0; pthread_spin_unlock(&recBufferLock); free(rp); pthread_exit(NULL); return 0; // returns something}
开发者ID:detlevn,项目名称:qgismapper,代码行数:85,
示例23: main//.........这里部分代码省略......... third header holds the bitstream codebook. We merely need to make the headers, then pass them to libvorbis one at a time; libvorbis handles the additional Ogg bitstream constraints */ { ogg_packet header; ogg_packet header_comm; ogg_packet header_code; vorbis_analysis_headerout(&vd,&vc,&header,&header_comm,&header_code); ogg_stream_packetin(&os,&header); /* automatically placed in its own page */ ogg_stream_packetin(&os,&header_comm); ogg_stream_packetin(&os,&header_code); /* This ensures the actual * audio data will start on a new page, as per spec */ while(!eos){ int result=ogg_stream_flush(&os,&og); if(result==0)break; fwrite(og.header,1,og.header_len,stdout); fwrite(og.body,1,og.body_len,stdout); } } while(!eos){ long i; long bytes=fread(readbuffer,1,READ*4,stdin); /* stereo hardwired here */ if(bytes==0){ /* end of file. this can be done implicitly in the mainline, but it's easier to see here in non-clever fashion. Tell the library we're at end of stream so that it can handle the last frame and mark end of stream in the output properly */ vorbis_analysis_wrote(&vd,0); }else{ /* data to encode */ /* expose the buffer to submit data */ float **buffer=vorbis_analysis_buffer(&vd,READ); /* uninterleave samples */ for(i=0;i<bytes/4;i++){ buffer[0][i]=((readbuffer[i*4+1]<<8)| (0x00ff&(int)readbuffer[i*4]))/32768.f; buffer[1][i]=((readbuffer[i*4+3]<<8)| (0x00ff&(int)readbuffer[i*4+2]))/32768.f; } /* tell the library how much we actually submitted */ vorbis_analysis_wrote(&vd,i); } /* vorbis does some data preanalysis, then divvies up blocks for more involved (potentially parallel) processing. Get a single block for encoding now */ while(vorbis_analysis_blockout(&vd,&vb)==1){ /* analysis, assume we want to use bitrate management */ vorbis_analysis(&vb,NULL); vorbis_bitrate_addblock(&vb); while(vorbis_bitrate_flushpacket(&vd,&op)){ /* weld the packet into the bitstream */ ogg_stream_packetin(&os,&op); /* write out pages (if any) */ while(!eos){ int result=ogg_stream_pageout(&os,&og); if(result==0)break; fwrite(og.header,1,og.header_len,stdout); fwrite(og.body,1,og.body_len,stdout); /* this could be set above, but for illustrative purposes, I do it here (to show that vorbis does know where the stream ends) */ if(ogg_page_eos(&og))eos=1; } } } } /* clean up and exit. vorbis_info_clear() must be called last */ ogg_stream_clear(&os); vorbis_block_clear(&vb); vorbis_dsp_clear(&vd); vorbis_comment_clear(&vc); vorbis_info_clear(&vi); /* ogg_page and ogg_packet structs always point to storage in libvorbis. They're never freed or manipulated directly */ fprintf(stderr,"Done./n"); return(0);}
开发者ID:John-He-928,项目名称:krkrz,代码行数:101,
示例24: vorbis_analysis_wroteint vorbis_analysis_wrote(vorbis_dsp_state *v, int vals){ vorbis_info *vi=v->vi; codec_setup_info *ci=vi->codec_setup; if(vals<=0){ int order=32; int i; float *lpc=alloca(order*sizeof(*lpc)); /* if it wasn't done earlier (very short sample) */ if(!v->preextrapolate) _preextrapolate_helper(v); /* We're encoding the end of the stream. Just make sure we have [at least] a few full blocks of zeroes at the end. */ /* actually, we don't want zeroes; that could drop a large amplitude off a cliff, creating spread spectrum noise that will suck to encode. Extrapolate for the sake of cleanliness. */ vorbis_analysis_buffer(v,ci->blocksizes[1]*3); v->eofflag=v->pcm_current; v->pcm_current+=ci->blocksizes[1]*3; for(i=0;i<vi->channels;i++){ if(v->eofflag>order*2){ /* extrapolate with LPC to fill in */ long n; /* make a predictor filter */ n=v->eofflag; if(n>ci->blocksizes[1])n=ci->blocksizes[1]; vorbis_lpc_from_data(v->pcm[i]+v->eofflag-n,lpc,n,order); /* run the predictor filter */ vorbis_lpc_predict(lpc,v->pcm[i]+v->eofflag-order,order, v->pcm[i]+v->eofflag,v->pcm_current-v->eofflag); }else{ /* not enough data to extrapolate (unlikely to happen due to guarding the overlap, but bulletproof in case that assumtion goes away). zeroes will do. */ memset(v->pcm[i]+v->eofflag,0, (v->pcm_current-v->eofflag)*sizeof(*v->pcm[i])); } } }else{ if(v->pcm_current+vals>v->pcm_storage) return(OV_EINVAL); v->pcm_current+=vals; /* we may want to reverse extrapolate the beginning of a stream too... in case we're beginning on a cliff! */ /* clumsy, but simple. It only runs once, so simple is good. */ if(!v->preextrapolate && v->pcm_current-v->centerW>ci->blocksizes[1]) _preextrapolate_helper(v); } return(0);}
开发者ID:denjones,项目名称:spengine,代码行数:61,
示例25: vorbis_analysis_buffer/**************************************************************************** * Encode: the whole thing **************************************************************************** * This function spits out ogg packets. ****************************************************************************/static block_t *Encode( encoder_t *p_enc, block_t *p_aout_buf ){ encoder_sys_t *p_sys = p_enc->p_sys; ogg_packet oggpacket; block_t *p_block, *p_chain = NULL; float **buffer; /* FIXME: flush buffers in here */ if( unlikely( !p_aout_buf ) ) return NULL; mtime_t i_pts = p_aout_buf->i_pts - (mtime_t)1000000 * (mtime_t)p_sys->i_samples_delay / (mtime_t)p_enc->fmt_in.audio.i_rate; p_sys->i_samples_delay += p_aout_buf->i_nb_samples; buffer = vorbis_analysis_buffer( &p_sys->vd, p_aout_buf->i_nb_samples ); /* convert samples to float and uninterleave */ for( unsigned int i = 0; i < p_sys->i_channels; i++ ) { for( unsigned int j = 0 ; j < p_aout_buf->i_nb_samples ; j++ ) { buffer[i][j]= ((float *)p_aout_buf->p_buffer) [j * p_sys->i_channels + p_sys->pi_chan_table[i]]; } } vorbis_analysis_wrote( &p_sys->vd, p_aout_buf->i_nb_samples ); while( vorbis_analysis_blockout( &p_sys->vd, &p_sys->vb ) == 1 ) { int i_samples; vorbis_analysis( &p_sys->vb, NULL ); vorbis_bitrate_addblock( &p_sys->vb ); while( vorbis_bitrate_flushpacket( &p_sys->vd, &oggpacket ) ) { int i_block_size; p_block = block_Alloc( oggpacket.bytes ); memcpy( p_block->p_buffer, oggpacket.packet, oggpacket.bytes ); i_block_size = vorbis_packet_blocksize( &p_sys->vi, &oggpacket ); if( i_block_size < 0 ) i_block_size = 0; i_samples = ( p_sys->i_last_block_size + i_block_size ) >> 2; p_sys->i_last_block_size = i_block_size; p_block->i_length = (mtime_t)1000000 * (mtime_t)i_samples / (mtime_t)p_enc->fmt_in.audio.i_rate; p_block->i_dts = p_block->i_pts = i_pts; p_sys->i_samples_delay -= i_samples; /* Update pts */ i_pts += p_block->i_length; block_ChainAppend( &p_chain, p_block ); } } return p_chain;}
开发者ID:J861449197,项目名称:vlc,代码行数:69,
示例26: encode_vorbis_file//.........这里部分代码省略......... outfile.write(og.body, og.body_len); } } while(!eos) { long bytes_per_sample = bits_per_sample/8; long bytes=(long)infile.read(readbuffer,llclamp((S32)(READ_BUFFER*num_channels*bytes_per_sample),0,data_left)); /* stereo hardwired here */ if (bytes==0) { /* end of file. this can be done implicitly in the mainline, but it's easier to see here in non-clever fashion. Tell the library we're at end of stream so that it can handle the last frame and mark end of stream in the output properly */ vorbis_analysis_wrote(&vd,0);// eos = 1; } else { long i; long samples; int temp; data_left -= bytes; /* data to encode */ /* expose the buffer to submit data */ float **buffer=vorbis_analysis_buffer(&vd,READ_BUFFER); i = 0; samples = bytes / (num_channels * bytes_per_sample); if (num_channels == 2) { if (bytes_per_sample == 2) { /* uninterleave samples */ for(i=0; i<samples ;i++) { temp = ((signed char *)readbuffer)[i*4+1]; /*Flawfinder: ignore*/ temp += ((signed char *)readbuffer)[i*4+3]; /*Flawfinder: ignore*/ temp <<= 8; temp += readbuffer[i*4]; temp += readbuffer[i*4+2]; buffer[0][i] = ((float)temp) / 65536.f; } } else // presume it's 1 byte per which is unsigned (F#@%ing wav "standard") { /* uninterleave samples */ for(i=0; i<samples ;i++) { temp = readbuffer[i*2+0]; temp += readbuffer[i*2+1]; temp -= 256; buffer[0][i] = ((float)temp) / 256.f; } } }
开发者ID:1234-,项目名称:SingularityViewer,代码行数:67,
示例27: Encodeint Encode(void *ctx, int nNumBytesRead, uint8_t* pbtStream){ ogg_context *context = (ogg_context *)ctx; if (!context || !context->callbacks.write) return -1; int eos = 0; int bytes_left = nNumBytesRead; while (bytes_left) { const int channels = 2; const int bits_per_channel = 16; float **buffer = vorbis_analysis_buffer(&context->vorbisDspState, OGG_BLOCK_FRAMES); /* uninterleave samples */ int bytes_per_frame = channels * (bits_per_channel >> 3); int frames = std::min(bytes_left / bytes_per_frame, OGG_BLOCK_FRAMES); int16_t* buf = (int16_t*)pbtStream; for (int i = 0; i < frames; i++) { for (int j = 0; j < channels; j++) buffer[j][i] = (*buf++) / 32768.0f; } pbtStream += frames * bytes_per_frame; bytes_left -= frames * bytes_per_frame; /* tell the library how much we actually submitted */ vorbis_analysis_wrote(&context->vorbisDspState, frames); /* vorbis does some data preanalysis, then divvies up blocks for more involved (potentially parallel) processing. Get a single block for encoding now */ while (vorbis_analysis_blockout(&context->vorbisDspState, &context->vorbisBlock) == 1) { /* analysis, assume we want to use bitrate management */ vorbis_analysis(&context->vorbisBlock, NULL); vorbis_bitrate_addblock(&context->vorbisBlock); ogg_packet packet; ogg_page page; while (vorbis_bitrate_flushpacket(&context->vorbisDspState, &packet)) { /* weld the packet into the bitstream */ ogg_stream_packetin(&context->oggStreamState, &packet); /* write out pages (if any) */ while (!eos) { int result = ogg_stream_pageout(&context->oggStreamState, &page); if (result == 0) break; context->callbacks.write(context->callbacks.opaque, page.header, page.header_len); context->callbacks.write(context->callbacks.opaque, page.body, page.body_len); /* this could be set above, but for illustrative purposes, I do it here (to show that vorbis does know where the stream ends) */ if (ogg_page_eos(&page)) eos = 1; } } } } // return bytes consumed return nNumBytesRead - bytes_left;}
开发者ID:jmarshallnz,项目名称:audioencoder.vorbis,代码行数:69,
示例28: WXUNUSEDint ExportOGG::Export(AudacityProject *project, int numChannels, const wxString &fName, bool selectionOnly, double t0, double t1, MixerSpec *mixerSpec, const Tags *metadata, int WXUNUSED(subformat)){ double rate = project->GetRate(); const TrackList *tracks = project->GetTracks(); double quality = (gPrefs->Read(wxT("/FileFormats/OggExportQuality"), 50)/(float)100.0); wxLogNull logNo; // temporarily disable wxWidgets error messages int updateResult = eProgressSuccess; int eos = 0; FileIO outFile(fName, FileIO::Output); if (!outFile.IsOpened()) { wxMessageBox(_("Unable to open target file for writing")); return false; } // All the Ogg and Vorbis encoding data ogg_stream_state stream; ogg_page page; ogg_packet packet; vorbis_info info; vorbis_comment comment; vorbis_dsp_state dsp; vorbis_block block; // Encoding setup vorbis_info_init(&info); vorbis_encode_init_vbr(&info, numChannels, int(rate + 0.5), quality); // Retrieve tags if (!FillComment(project, &comment, metadata)) { return false; } // Set up analysis state and auxiliary encoding storage vorbis_analysis_init(&dsp, &info); vorbis_block_init(&dsp, &block); // Set up packet->stream encoder. According to encoder example, // a random serial number makes it more likely that you can make // chained streams with concatenation. srand(time(NULL)); ogg_stream_init(&stream, rand()); // First we need to write the required headers: // 1. The Ogg bitstream header, which contains codec setup params // 2. The Vorbis comment header // 3. The bitstream codebook. // // After we create those our responsibility is complete, libvorbis will // take care of any other ogg bistream constraints (again, according // to the example encoder source) ogg_packet bitstream_header; ogg_packet comment_header; ogg_packet codebook_header; vorbis_analysis_headerout(&dsp, &comment, &bitstream_header, &comment_header, &codebook_header); // Place these headers into the stream ogg_stream_packetin(&stream, &bitstream_header); ogg_stream_packetin(&stream, &comment_header); ogg_stream_packetin(&stream, &codebook_header); // Flushing these headers now guarentees that audio data will // start on a NEW page, which apparently makes streaming easier while (ogg_stream_flush(&stream, &page)) { outFile.Write(page.header, page.header_len); outFile.Write(page.body, page.body_len); } const WaveTrackConstArray waveTracks = tracks->GetWaveTrackConstArray(selectionOnly, false); { auto mixer = CreateMixer(waveTracks, tracks->GetTimeTrack(), t0, t1, numChannels, SAMPLES_PER_RUN, false, rate, floatSample, true, mixerSpec); ProgressDialog progress(wxFileName(fName).GetName(), selectionOnly ? _("Exporting the selected audio as Ogg Vorbis") : _("Exporting the entire project as Ogg Vorbis")); while (updateResult == eProgressSuccess && !eos) { float **vorbis_buffer = vorbis_analysis_buffer(&dsp, SAMPLES_PER_RUN); sampleCount samplesThisRun = mixer->Process(SAMPLES_PER_RUN); if (samplesThisRun == 0) {//.........这里部分代码省略.........
开发者ID:AthiVarathan,项目名称:audacity,代码行数:101,
示例29: PROFILER_LABELnsresultVorbisTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData){ if (mEosSetInEncoder) { return NS_OK; } PROFILER_LABEL("VorbisTrackEncoder", "GetEncodedTrack", js::ProfileEntry::Category::OTHER); nsAutoPtr<AudioSegment> sourceSegment; sourceSegment = new AudioSegment(); { // Move all the samples from mRawSegment to sourceSegment. We only hold // the monitor in this block. ReentrantMonitorAutoEnter mon(mReentrantMonitor); // Wait if mEncoder is not initialized, or when not enough raw data, but is // not the end of stream nor is being canceled. while (!mCanceled && mRawSegment.GetDuration() < GetPacketDuration() && !mEndOfStream) { mon.Wait(); } VORBISLOG("GetEncodedTrack passes wait, duration is %lld/n", mRawSegment.GetDuration()); if (mCanceled || mEncodingComplete) { return NS_ERROR_FAILURE; } sourceSegment->AppendFrom(&mRawSegment); } if (mEndOfStream && (sourceSegment->GetDuration() == 0) && !mEosSetInEncoder) { mEncodingComplete = true; mEosSetInEncoder = true; VORBISLOG("[Vorbis] Done encoding."); vorbis_analysis_wrote(&mVorbisDsp, 0); GetEncodedFrames(aData); return NS_OK; } // Start encoding data. AudioSegment::ChunkIterator iter(*sourceSegment); AudioDataValue **vorbisBuffer = vorbis_analysis_buffer(&mVorbisDsp, (int)sourceSegment->GetDuration()); int framesCopied = 0; AutoTArray<AudioDataValue, 9600> interleavedPcm; AutoTArray<AudioDataValue, 9600> nonInterleavedPcm; interleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels); nonInterleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels); while (!iter.IsEnded()) { AudioChunk chunk = *iter; int frameToCopy = chunk.GetDuration(); if (!chunk.IsNull()) { InterleaveTrackData(chunk, frameToCopy, mChannels, interleavedPcm.Elements() + framesCopied * mChannels); } else { // empty data memset(interleavedPcm.Elements() + framesCopied * mChannels, 0, frameToCopy * mChannels * sizeof(AudioDataValue)); } framesCopied += frameToCopy; iter.Next(); } // De-interleave the interleavedPcm. DeInterleaveTrackData(interleavedPcm.Elements(), framesCopied, mChannels, nonInterleavedPcm.Elements()); // Copy the nonInterleavedPcm to vorbis buffer. for(uint8_t i = 0; i < mChannels; ++i) { memcpy(vorbisBuffer[i], nonInterleavedPcm.Elements() + framesCopied * i, framesCopied * sizeof(AudioDataValue)); } // Now the vorbisBuffer contain the all data in non-interleaved. // Tell the library how much we actually submitted. vorbis_analysis_wrote(&mVorbisDsp, framesCopied); VORBISLOG("vorbis_analysis_wrote framesCopied %d/n", framesCopied); GetEncodedFrames(aData); return NS_OK;}
开发者ID:Shaif95,项目名称:gecko-dev,代码行数:84,
示例30: oe_encodeint oe_encode ( oe_enc_opt* opt ){ ogg_stream_state os; ogg_page og; ogg_packet op; vorbis_dsp_state vd; vorbis_block vb; vorbis_info vi; long samplesdone = 0; int eos; long bytes_written = 0; long packetsdone = 0; int ret = 0; vorbis_info_init ( &vi ); if ( opt->quality >= 0.0f ) { if ( vorbis_encode_init_vbr ( &vi, opt->channels, opt->rate, opt->quality ) ) { vorbis_info_clear ( &vi ); return 1; } } else { if ( vorbis_encode_init ( &vi, opt->channels, opt->rate, opt->max_bitrate > 0 ? opt->max_bitrate * 1000 : -1, opt->bitrate * 1000, opt->min_bitrate > 0 ? opt->min_bitrate * 1000 : -1 ) ) { vorbis_info_clear ( &vi ); return 1; } } vorbis_analysis_init ( &vd, &vi ); vorbis_block_init ( &vd, &vb ); ogg_stream_init ( &os, opt->serialno ); ogg_packet header_main; ogg_packet header_comments; ogg_packet header_codebooks; int result; vorbis_analysis_headerout ( &vd,opt->comments, &header_main, &header_comments, &header_codebooks ); ogg_stream_packetin ( &os, &header_main ); ogg_stream_packetin ( &os, &header_comments ); ogg_stream_packetin ( &os, &header_codebooks ); while ( ( result = ogg_stream_flush ( &os, &og ) ) ) { if ( !result ) break; ret = oe_write_page ( &og, opt->out ); if ( ret != og.header_len + og.body_len ) { ret = 1; goto cleanup; } else bytes_written += ret; } eos = 0; while ( !eos ) { float** buffer = vorbis_analysis_buffer ( &vd, READSIZE ); long samples_read = opt->read_samples ( opt->readdata, buffer, READSIZE ); if ( samples_read == 0 ) vorbis_analysis_wrote ( &vd, 0 ); else { samplesdone += samples_read; vorbis_analysis_wrote ( &vd, samples_read ); } while ( vorbis_analysis_blockout ( &vd, &vb ) == 1 ) { vorbis_analysis ( &vb, NULL ); vorbis_bitrate_addblock ( &vb ); while ( vorbis_bitrate_flushpacket ( &vd, &op ) ) { ogg_stream_packetin ( &os,&op ); packetsdone++;//.........这里部分代码省略.........
开发者ID:Fliper12,项目名称:darkbasicpro,代码行数:101,
注:本文中的vorbis_analysis_buffer函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ vorbis_analysis_init函数代码示例 C++ vorbis_analysis函数代码示例 |