这篇教程C++ swr_free函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中swr_free函数的典型用法代码示例。如果您正苦于以下问题:C++ swr_free函数的具体用法?C++ swr_free怎么用?C++ swr_free使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了swr_free函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: swr_alloc_set_opts /** * Initialize the audio resampler based on the input and output codec settings. * If the input and output sample formats differ, a conversion is required * libswresample takes care of this, but requires initialization. */ int AudioDecoder::init_resampler(AVCodecContext *input_codec_context, AVCodecContext *output_codec_context) { int error; /** * Create a resampler context for the conversion. * Set the conversion parameters. * Default channel layouts based on the number of channels * are assumed for simplicity (they are sometimes not detected * properly by the demuxer and/or decoder). */ resample_context = swr_alloc_set_opts(NULL, av_get_default_channel_layout(output_codec_context->channels), output_codec_context->sample_fmt, output_codec_context->sample_rate, av_get_default_channel_layout(input_codec_context->channels), input_codec_context->sample_fmt, input_codec_context->sample_rate, 0, NULL); if (!resample_context) { ELOG_WARN( "Could not allocate resample context/n"); return AVERROR(ENOMEM); } /** * Perform a sanity check so that the number of converted samples is * not greater than the number of samples to be converted. * If the sample rates differ, this case has to be handled differently */ ELOG_DEBUG( "audio input sample_rate = %d, out %d", input_codec_context->sample_rate, output_codec_context->sample_rate); /** Open the resampler with the specified parameters. */ if ((error = swr_init(resample_context)) < 0) { ELOG_WARN( "Could not open resample context"); swr_free(&resample_context); return error; } /** Open the resampler with the specified parameters. */ if ((error = swr_init(resample_context)) < 0) { ELOG_DEBUG( "Could not open resample context"); swr_free(&resample_context); return error; } ELOG_DEBUG( "swr_init done"); return 0; }
开发者ID:fanchuanster,项目名称:erizo_externalinput,代码行数:59,
示例2: openAudioStream kxMovieError openAudioStream(size_t audioStream) { AVCodecContext *codecCtx = _formatCtx->streams[audioStream]->codec; SwrContext *swrContext = NULL; AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id); if(!codec) return kxMovieErrorCodecNotFound; if (avcodec_open2(codecCtx, codec, NULL) < 0) return kxMovieErrorOpenCodec; if (!audioCodecIsSupported(codecCtx)) { swrContext = swr_alloc_set_opts(NULL, av_get_default_channel_layout(codecCtx->channels), AV_SAMPLE_FMT_S16, codecCtx->sample_rate, av_get_default_channel_layout(codecCtx->channels), codecCtx->sample_fmt, codecCtx->sample_rate, 0, NULL); if (!swrContext || swr_init(swrContext)) { if (swrContext) swr_free(&swrContext); avcodec_close(codecCtx); return kxMovieErroReSampler; } } _audioFrame = av_frame_alloc(); if (!_audioFrame) { if (swrContext) swr_free(&swrContext); avcodec_close(codecCtx); return kxMovieErrorAllocateFrame; } _audioStream = audioStream; _audioCodecCtx = codecCtx; _swrContext = swrContext; return kxMovieErrorNone; }
开发者ID:lgdiy1982,项目名称:SDLAudioPlayer,代码行数:50,
示例3: render_closevoid render_close(void *hrender){ RENDER *render = (RENDER*)hrender; // wait visual effect thread exit render->render_status = RENDER_CLOSE;#if CONFIG_ENABLE_VEFFECT pthread_join(render->veffect_thread, NULL); veffect_destroy(render->veffect_context);#endif //++ audio ++// // destroy adev adev_destroy(render->adev); // free swr context swr_free(&render->swr_context); //-- audio --// //++ video ++// // destroy vdev vdev_destroy(render->vdev); // free sws context if (render->sws_context) { sws_freeContext(render->sws_context); } //-- video --// // free context free(render);}
开发者ID:rockcarry,项目名称:ffplayer,代码行数:33,
示例4: avcodec_closevoid FFmpeg_Decoder::close(){ if(mStream) avcodec_close((*mStream)->codec); mStream = NULL; av_free_packet(&mPacket); av_freep(&mFrame); swr_free(&mSwr); av_freep(&mDataBuf); if(mFormatCtx) { if (mFormatCtx->pb != NULL) { // mFormatCtx->pb->buffer must be freed by hand, // if not, valgrind will show memleak, see: // // https://trac.ffmpeg.org/ticket/1357 // if (mFormatCtx->pb->buffer != NULL) { av_free(mFormatCtx->pb->buffer); mFormatCtx->pb->buffer = NULL; } av_free(mFormatCtx->pb); mFormatCtx->pb = NULL; } avformat_close_input(&mFormatCtx); } mDataStream.reset();}
开发者ID:A1-Triard,项目名称:openmw,代码行数:33,
示例5: render_closevoid render_close(void *hrender){ RENDER *render = (RENDER*)hrender; //++ audio ++// // destroy adev adev_destroy(render->adev); // free swr context swr_free(&render->pSWRContext); //-- audio --// //++ video ++// // destroy vdev vdev_destroy(render->vdev); // free sws context if (render->pSWSContext) { sws_freeContext(render->pSWSContext); } //-- video --// // free context free(render);}
开发者ID:chenhongliang2008,项目名称:ffplayer,代码行数:25,
示例6: av_frame_freevoid COMXAudioCodecOMX::Dispose(){ av_frame_free(&m_pFrame1); swr_free(&m_pConvert); avcodec_free_context(&m_pCodecContext); m_bGotFrame = false;}
开发者ID:DaHenchmen,项目名称:DHMC,代码行数:7,
示例7: closeAudioStream void closeAudioStream() { _audioStream = -1; if (_swrContext) { swr_free(&_swrContext); _swrContext = NULL; } if (_audioFrame) { av_free(_audioFrame); _audioFrame = NULL; } if (_audioCodecCtx) { avcodec_close(_audioCodecCtx); _audioCodecCtx = NULL; } avformat_close_input(&_formatCtx); //av_register_all(); }
开发者ID:lgdiy1982,项目名称:SDLAudioPlayer,代码行数:27,
示例8: avcodec_close void FFMPEGer::close_stream(AVFormatContext *oc, OutputStream *ost){ if(ost->st->codec != NULL){ avcodec_close(ost->st->codec); } if(ost->frame != NULL){ av_frame_free(&ost->frame); ost->frame = NULL; } if(ost->tmp_frame != NULL){ av_frame_free(&ost->tmp_frame); ost->tmp_frame = NULL; } if(ost->sws_ctx != NULL){ sws_freeContext(ost->sws_ctx); ost->sws_ctx = NULL; } if(ost->swr_ctx != NULL){ swr_free(&ost->swr_ctx); ost->swr_ctx = NULL; } }
开发者ID:forbe,项目名称:recorder,代码行数:25,
示例9: avresample_closevoid AudioLoader::closeAudioFile() { if (!_demuxCtx) { return; }#if HAVE_AVRESAMPLE if (_convertCtxAv) { avresample_close(_convertCtxAv); avresample_free(&_convertCtxAv); }#elif HAVE_SWRESAMPLE if (_convertCtx) { swr_free(&_convertCtx); }#endif // Close the codec avcodec_close(_audioCtx); // Close the audio file avformat_close_input(&_demuxCtx); // free AVPacket av_free_packet(&_packet); _demuxCtx = 0; _audioCtx = 0;}
开发者ID:rhythagoras,项目名称:essentia,代码行数:28,
示例10: opus_decode_closestatic av_cold int opus_decode_close(AVCodecContext *avctx){ OpusContext *c = avctx->priv_data; int i; for (i = 0; i < c->nb_streams; i++) { OpusStreamContext *s = &c->streams[i]; ff_silk_free(&s->silk); ff_celt_free(&s->celt); av_freep(&s->out_dummy); s->out_dummy_allocated_size = 0; av_audio_fifo_free(s->celt_delay); swr_free(&s->swr); } av_freep(&c->streams); c->nb_streams = 0; av_freep(&c->channel_maps); return 0;}
开发者ID:KonradIT,项目名称:FFmpeg,代码行数:25,
示例11: swr_freeVideoDecoder::~VideoDecoder() { //Take care of cleanup if(swrContext != NULL) { swr_free(&swrContext); } if(swsContext != NULL) { sws_freeContext(swsContext); } if(videoBuffer != NULL) { delete[] videoBuffer; } if(audioCodecContext != NULL) { avcodec_close(audioCodecContext); } if(videoCodecContext != NULL) { avcodec_close(videoCodecContext); } av_frame_free(&audioFrame); for(int i = 0; i < VIDEOPLAYER_VIDEO_NUM_BUFFERED_FRAMES; i++) { av_frame_free(rgbFrames + i); } av_frame_free(&frame); avformat_close_input(&formatContext); if(avioContext != NULL) { av_free(avioContext); }}
开发者ID:Caresilabs,项目名称:gdx-video,代码行数:35,
示例12: freeMediaThread::Pcm::~Pcm(){ free(buf_); if (swr_) { swr_free(&swr_); }}
开发者ID:sunkwei,项目名称:zmovie,代码行数:7,
示例13: av_freevoid COMXAudioCodecOMX::Dispose(){ if (m_pFrame1) { av_free(m_pFrame1); m_pFrame1 = NULL; } if (m_pConvert) { swr_free(&m_pConvert); } if (m_pCodecContext) { if (m_bOpenedCodec) { avcodec_close(m_pCodecContext); } m_bOpenedCodec = false; av_free(m_pCodecContext); m_pCodecContext = NULL; } m_iBufferSize1 = 0; m_iBufferSize2 = 0; m_iBuffered = 0;}
开发者ID:kimitobo,项目名称:ofxOMXPlayer,代码行数:29,
示例14: swr_freeSwr::~Swr(){ if (this->context != nullptr) { swr_free(&this->context); } assert(this->context == nullptr);}
开发者ID:CaptainHayashi,项目名称:playslave-plusplus,代码行数:7,
示例15: close_streamstatic void close_stream(AVFormatContext *oc, OutputStream *ost){ avcodec_close(ost->st->codec); av_frame_free(&ost->frame); sws_freeContext(ost->sws_ctx); swr_free(&ost->swr_ctx);}
开发者ID:david74chou,项目名称:fMP4,代码行数:7,
示例16: control// Initialization and runtime controlstatic int control(struct af_instance_s* af, int cmd, void* arg){ af_resample_t* s = (af_resample_t*)af->setup; af_data_t *data= (af_data_t*)arg; int out_rate, test_output_res; // helpers for checking input format switch(cmd){ case AF_CONTROL_REINIT: if((af->data->rate == data->rate) || (af->data->rate == 0)) return AF_DETACH; af->data->nch = data->nch; if (af->data->nch > AF_NCH) af->data->nch = AF_NCH; af->data->format = AF_FORMAT_S16_NE; af->data->bps = 2; af->mul = (double)af->data->rate / data->rate; af->delay = af->data->nch * s->filter_length / FFMIN(af->mul, 1); // *bps*.5 if (s->ctx_out_rate != af->data->rate || s->ctx_in_rate != data->rate || s->ctx_filter_size != s->filter_length || s->ctx_phase_shift != s->phase_shift || s->ctx_linear != s->linear || s->ctx_cutoff != s->cutoff) { swr_free(&s->swrctx); if((s->swrctx=swr_alloc()) == NULL) return AF_ERROR; av_opt_set_int(s->swrctx, "out_sample_rate", af->data->rate, 0); av_opt_set_int(s->swrctx, "in_sample_rate", data->rate, 0); av_opt_set_int(s->swrctx, "filter_size", s->filter_length, 0); av_opt_set_int(s->swrctx, "phase_shift", s->phase_shift, 0); av_opt_set_int(s->swrctx, "linear_interp", s->linear, 0); av_opt_set_double(s->swrctx, "cutoff", s->cutoff, 0); av_opt_set_sample_fmt(s->swrctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_sample_fmt(s->swrctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(s->swrctx, "in_channel_count", af->data->nch, 0); av_opt_set_int(s->swrctx, "out_channel_count", af->data->nch, 0); if(swr_init(s->swrctx) < 0) return AF_ERROR; s->ctx_out_rate = af->data->rate; s->ctx_in_rate = data->rate; s->ctx_filter_size = s->filter_length; s->ctx_phase_shift = s->phase_shift; s->ctx_linear = s->linear; s->ctx_cutoff = s->cutoff; } // hack to make af_test_output ignore the samplerate change out_rate = af->data->rate; af->data->rate = data->rate; test_output_res = af_test_output(af, (af_data_t*)arg); af->data->rate = out_rate; return test_output_res; case AF_CONTROL_COMMAND_LINE:{ s->cutoff= 0.0; sscanf((char*)arg,"%d:%d:%d:%d:%lf", &af->data->rate, &s->filter_length, &s->linear, &s->phase_shift, &s->cutoff); if(s->cutoff <= 0.0) s->cutoff= FFMAX(1.0 - 6.5/(s->filter_length+8), 0.80); return AF_OK; } case AF_CONTROL_RESAMPLE_RATE | AF_CONTROL_SET: af->data->rate = *(int*)arg; return AF_OK; } return AF_UNKNOWN;}
开发者ID:basinilya,项目名称:mplayer,代码行数:60,
示例17: ResetCAEEncoderFFmpeg::~CAEEncoderFFmpeg(){ Reset(); av_freep(&m_CodecCtx); av_freep(&m_ResampBuffer); if (m_SwrCtx) swr_free(&m_SwrCtx);}
开发者ID:cg110,项目名称:xbmc,代码行数:8,
示例18: av_freepAudioDecoder::~AudioDecoder() { if (m_outData) { av_freep(&m_outData[0]); } av_freep(&m_outData); swr_free(&m_resampleCtx);}
开发者ID:fjelliott,项目名称:fs2open.github.com,代码行数:8,
示例19: ffFreeAVCtx static void ffFreeAVCtx(AVCtx *ctx) { if (ctx->frame) av_frame_free(&ctx->frame); if (ctx->swr_ctx) swr_free(&ctx->swr_ctx); if (ctx->sws_ctx) sws_freeContext(ctx->sws_ctx); }
开发者ID:JohnCrash,项目名称:ffplayer,代码行数:9,
示例20: privvoid AudioController::uninit(af_instance *af) { auto ac = priv(af); auto d = ac->d; Q_ASSERT(ac != nullptr); d->af = nullptr; if (d->swr) swr_free(&d->swr); talloc_free(d->resampled); d->resampled = nullptr;}
开发者ID:jsj2008,项目名称:cmplayer,代码行数:9,
示例21: audio_convertstatic void audio_convert(dtaudio_decoder_t *decoder, AVFrame * dst, AVFrame * src){ int nb_sample; int dst_buf_size; int out_channels; //for audio post processor //struct SwsContext *m_sws_ctx = NULL; struct SwrContext *m_swr_ctx = NULL; //ResampleContext *m_resample_ctx=NULL; enum AVSampleFormat src_fmt = avctxp->sample_fmt; enum AVSampleFormat dst_fmt = AV_SAMPLE_FMT_S16; dst->linesize[0] = src->linesize[0]; *dst = *src; dst->data[0] = NULL; out_channels = decoder->para.dst_channels; nb_sample = frame->nb_samples; dst_buf_size = nb_sample * av_get_bytes_per_sample(dst_fmt) * out_channels; dst->data[0] = (uint8_t *) av_malloc(dst_buf_size); avcodec_fill_audio_frame(dst, out_channels, dst_fmt, dst->data[0], dst_buf_size, 0); dt_debug(TAG, "SRCFMT:%d dst_fmt:%d /n", src_fmt, dst_fmt); /* resample toAV_SAMPLE_FMT_S16 */ if (src_fmt != dst_fmt || out_channels != decoder->para.channels) { if (!m_swr_ctx) { uint64_t in_channel_layout = av_get_default_channel_layout(avctxp->channels); uint64_t out_channel_layout = av_get_default_channel_layout(out_channels); m_swr_ctx = swr_alloc_set_opts(NULL, out_channel_layout, dst_fmt, avctxp->sample_rate, in_channel_layout, src_fmt, avctxp->sample_rate, 0, NULL); swr_init(m_swr_ctx); } uint8_t **out = (uint8_t **) & dst->data; const uint8_t **in = (const uint8_t **) src->extended_data; if (m_swr_ctx) { int ret, out_count; out_count = nb_sample; ret = swr_convert(m_swr_ctx, out, out_count, in, nb_sample); if (ret < 0) { //set audio mute memset(dst->data[0], 0, dst_buf_size); printf("audio convert failed, set mute data/n"); } } } else { // no need to convert ,just copy memcpy(dst->data[0], src->data[0], src->linesize[0]); } //free context if (m_swr_ctx != NULL) { swr_free(&m_swr_ctx); } //if(m_resample_ctx!=NULL) // audio_resample_close(m_resample_ctx);}
开发者ID:peterfuture,项目名称:dtplayer_c,代码行数:56,
示例22: av_frame_freeSimpleAT3::~SimpleAT3() { if (frame_) av_frame_free(&frame_); if (codecCtx_) avcodec_close(codecCtx_); codecCtx_ = 0; codec_ = 0; if (swrCtx_) swr_free(&swrCtx_);}
开发者ID:A671DR218,项目名称:ppsspp,代码行数:10,
示例23: uninit_optsvoid uninit_opts(void){#if CONFIG_SWSCALE sws_freeContext(sws_opts); sws_opts = NULL;#endif swr_free(&swr_opts); av_dict_free(&format_opts); av_dict_free(&codec_opts);}
开发者ID:AlexanderDenkMA,项目名称:TypeChef-mplayerAnalysis,代码行数:10,
示例24: FFMpegLoader::~FFMpegLoader() { if (codecContext) avcodec_free_context(&codecContext); if (swrContext) swr_free(&swrContext); if (dstSamplesData) { if (dstSamplesData[0]) { av_freep(&dstSamplesData[0]); } av_freep(&dstSamplesData); } av_frame_free(&frame);}
开发者ID:Peque,项目名称:tdesktop,代码行数:11,
示例25: hb_audio_resample_freevoid hb_audio_resample_free(hb_audio_resample_t *resample){ if (resample != NULL) { if (resample->swresample != NULL) { swr_free(&resample->swresample); } free(resample); }}
开发者ID:RandomEngy,项目名称:HandBrake,代码行数:11,
示例26: av_write_trailerCVideoLivRecord::~CVideoLivRecord(void){ av_write_trailer(m_pAVFormatContext); if (m_bHasVideo){ avcodec_close(m_pVideoStream->codec); av_frame_free(&m_pVideoFrame); av_frame_free(&m_pVideoBkFrame); sws_freeContext(m_pVideoSwsctx); swr_free(&m_pVideoSwrctx); } if (m_bHasAudio){ avcodec_close(m_pAudioStream->codec); av_frame_free(&m_pVideoFrame); av_frame_free(&m_pVideoBkFrame); swr_free(&m_pAudioSwrctx); } if (!(m_pAVFormatContext->oformat->flags & AVFMT_NOFILE)) avio_close(m_pAVFormatContext->pb); avformat_free_context(m_pAVFormatContext);}
开发者ID:u-stone,项目名称:CodeBase,代码行数:20,
示例27: releasevoid release(JNIEnv *env,jclass clz){ if(i==0) return; LOGI("%s","release"); pause=1; usleep(10000); swr_free(&au_convert_ctx); av_free(pFrame); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); i=0;}
开发者ID:4455jkjh,项目名称:music_player,代码行数:12,
示例28: swr_freeAudioResamplerFfmpeg::~AudioResamplerFfmpeg() { if (_context) {#ifdef HAVE_SWRESAMPLE_H swr_free(&_context);#elif HAVE_AVRESAMPLE_H avresample_close(_context); avresample_free(&_context);#else audio_resample_close(_context);#endif }}
开发者ID:ascendancy721,项目名称:gnash,代码行数:12,
示例29: av_frame_freevoid Parser::EndProcess(){ try { if (frame_in) { av_frame_free(&frame_in); frame_in = NULL; } if (frame_out) { av_frame_free(&frame_out); frame_out = NULL; } for (int i = 0; i < (int)vetAux.size(); i++) if (vetAux[i].size() > 0) vetAux[i].clear(); if (vetAux.size() > 0) vetAux.clear(); for (int idxFrame = 0; idxFrame < (int)this->bufFrames.size(); idxFrame++) { for (int i = 0; i < (int)bufFrames[idxFrame].size(); i++) if (bufFrames[idxFrame][i].size() > 0) bufFrames[idxFrame][i].clear(); if (bufFrames[idxFrame].size() > 0) bufFrames[idxFrame].clear(); } if (bufFrames.size() > 0) bufFrames.clear(); if (dic) av_freep(dic); if (fmt_ctx_out->pb) avio_close(fmt_ctx_out->pb); if (swr_ctx) swr_free(&swr_ctx); if (fmt_ctx_out) avformat_free_context(fmt_ctx_out); if (cdc_ctx_out) avcodec_close(cdc_ctx_out); } catch (SignalException& err) { objLog->mr_printf(MR_LOG_ERROR, idRadio, "Destructor Parser: %s/n", err.what()); } catch(...) { objLog->mr_printf(MR_LOG_ERROR, idRadio, "Destructor Parser: General erros/n"); }}
开发者ID:josekleber,项目名称:captura,代码行数:53,
示例30: uninit// Deallocate memorystatic void uninit(struct af_instance_s* af){ if(af->data) free(af->data->audio); free(af->data); if(af->setup){ af_resample_t *s = af->setup; swr_free(&s->swrctx); av_free(s->in[0]); av_free(s->tmp[0]); free(s); }}
开发者ID:basinilya,项目名称:mplayer,代码行数:14,
注:本文中的swr_free函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ swr_init函数代码示例 C++ swr_alloc_set_opts函数代码示例 |