您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ FF_ARRAY_ELEMS函数代码示例

51自学网 2021-06-01 20:41:09
  C++
这篇教程C++ FF_ARRAY_ELEMS函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中FF_ARRAY_ELEMS函数的典型用法代码示例。如果您正苦于以下问题:C++ FF_ARRAY_ELEMS函数的具体用法?C++ FF_ARRAY_ELEMS怎么用?C++ FF_ARRAY_ELEMS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了FF_ARRAY_ELEMS函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ff_h263_encode_picture_header

void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number){    int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;    int best_clock_code=1;    int best_divisor=60;    int best_error= INT_MAX;    if(s->h263_plus){        for(i=0; i<2; i++){            int div, error;            div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);            div= av_clip(div, 1, 127);            error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);            if(error < best_error){                best_error= error;                best_divisor= div;                best_clock_code= i;            }        }    }    s->custom_pcf= best_clock_code!=1 || best_divisor!=60;    coded_frame_rate= 1800000;    coded_frame_rate_base= (1000+best_clock_code)*best_divisor;    avpriv_align_put_bits(&s->pb);    /* Update the pointer to last GOB */    s->ptr_lastgob = put_bits_ptr(&s->pb);    put_bits(&s->pb, 22, 0x20); /* PSC */    temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp                         (coded_frame_rate_base * (int64_t)s->avctx->time_base.den);    put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */    put_bits(&s->pb, 1, 1);     /* marker */    put_bits(&s->pb, 1, 0);     /* h263 id */    put_bits(&s->pb, 1, 0);     /* split screen off */    put_bits(&s->pb, 1, 0);     /* camera  off */    put_bits(&s->pb, 1, 0);     /* freeze picture release off */    format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);    if (!s->h263_plus) {        /* H.263v1 */        put_bits(&s->pb, 3, format);        put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));        /* By now UMV IS DISABLED ON H.263v1, since the restrictions        of H.263v1 UMV implies to check the predicted MV after        calculation of the current MB to see if we're on the limits */        put_bits(&s->pb, 1, 0);         /* Unrestricted Motion Vector: off */        put_bits(&s->pb, 1, 0);         /* SAC: off */        put_bits(&s->pb, 1, s->obmc);   /* Advanced Prediction */        put_bits(&s->pb, 1, 0);         /* only I/P frames, no PB frame */        put_bits(&s->pb, 5, s->qscale);        put_bits(&s->pb, 1, 0);         /* Continuous Presence Multipoint mode: off */    } else {        int ufep=1;        /* H.263v2 */        /* H.263 Plus PTYPE */        put_bits(&s->pb, 3, 7);        put_bits(&s->pb,3,ufep); /* Update Full Extended PTYPE */        if (format == 8)            put_bits(&s->pb,3,6); /* Custom Source Format */        else            put_bits(&s->pb, 3, format);        put_bits(&s->pb,1, s->custom_pcf);        put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */        put_bits(&s->pb,1,0); /* SAC: off */        put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */        put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */        put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */        put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */        put_bits(&s->pb,1,0); /* Reference Picture Selection: off */        put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */        put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */        put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */        put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */        put_bits(&s->pb,3,0); /* Reserved */        put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P);        put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */        put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */        put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */        put_bits(&s->pb,2,0); /* Reserved */        put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */        /* This should be here if PLUSPTYPE */        put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */        if (format == 8) {            /* Custom Picture Format (CPFMT) */            s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);            put_bits(&s->pb,4,s->aspect_ratio_info);            put_bits(&s->pb,9,(s->width >> 2) - 1);            put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */            put_bits(&s->pb,9,(s->height >> 2));            if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){                put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);//.........这里部分代码省略.........
开发者ID:18565773346,项目名称:android-h264-decoder,代码行数:101,


示例2: ff_h264_build_ref_list

//.........这里部分代码省略.........            case 1: {                const unsigned int abs_diff_pic_num = val + 1;                int frame_num;                if (abs_diff_pic_num > sl->max_pic_num) {                    av_log(h->avctx, AV_LOG_ERROR,                           "abs_diff_pic_num overflow/n");                    return AVERROR_INVALIDDATA;                }                if (modification_of_pic_nums_idc == 0)                    pred -= abs_diff_pic_num;                else                    pred += abs_diff_pic_num;                pred &= sl->max_pic_num - 1;                frame_num = pic_num_extract(h, pred, &pic_structure);                for (i = h->short_ref_count - 1; i >= 0; i--) {                    ref = h->short_ref[i];                    assert(ref->reference);                    assert(!ref->long_ref);                    if (ref->frame_num == frame_num &&                        (ref->reference & pic_structure))                        break;                }                if (i >= 0)                    ref->pic_id = pred;                break;            }            case 2: {                int long_idx;                pic_id = val; // long_term_pic_idx                long_idx = pic_num_extract(h, pic_id, &pic_structure);                if (long_idx > 31U) {                    av_log(h->avctx, AV_LOG_ERROR,                           "long_term_pic_idx overflow/n");                    return AVERROR_INVALIDDATA;                }                ref = h->long_ref[long_idx];                assert(!(ref && !ref->reference));                if (ref && (ref->reference & pic_structure)) {                    ref->pic_id = pic_id;                    assert(ref->long_ref);                    i = 0;                } else {                    i = -1;                }                break;            }            default:                av_assert0(0);            }            if (i < 0) {                av_log(h->avctx, AV_LOG_ERROR,                       "reference picture missing during reorder/n");                memset(&sl->ref_list[list][index], 0, sizeof(sl->ref_list[0][0])); // FIXME            } else {                for (i = index; i + 1 < sl->ref_count[list]; i++) {                    if (sl->ref_list[list][i].parent &&                        ref->long_ref == sl->ref_list[list][i].parent->long_ref &&                        ref->pic_id   == sl->ref_list[list][i].pic_id)                        break;                }                for (; i > index; i--) {                    sl->ref_list[list][i] = sl->ref_list[list][i - 1];                }                ref_from_h264pic(&sl->ref_list[list][index], ref);                if (FIELD_PICTURE(h)) {                    pic_as_field(&sl->ref_list[list][index], pic_structure);                }            }        }    }    for (list = 0; list < sl->list_count; list++) {        for (index = 0; index < sl->ref_count[list]; index++) {            if (   !sl->ref_list[list][index].parent                || (!FIELD_PICTURE(h) && (sl->ref_list[list][index].reference&3) != 3)) {                int i;                av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture, default is %d/n", h->default_ref[list].poc);                for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)                    h->last_pocs[i] = INT_MIN;                if (h->default_ref[list].parent                    && !(!FIELD_PICTURE(h) && (h->default_ref[list].reference&3) != 3))                    sl->ref_list[list][index] = h->default_ref[list];                else                    return -1;            }            av_assert0(av_buffer_get_ref_count(sl->ref_list[list][index].parent->f->buf[0]) > 0);        }    }    if (FRAME_MBAFF(h))        h264_fill_mbaff_ref_list(sl);    return 0;}
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:101,


示例3: av_frame_ref

int av_frame_ref(AVFrame *dst, const AVFrame *src){    int i, ret = 0;    dst->format         = src->format;    dst->width          = src->width;    dst->height         = src->height;    dst->channels       = src->channels;    dst->channel_layout = src->channel_layout;    dst->nb_samples     = src->nb_samples;    ret = frame_copy_props(dst, src, 0);    if (ret < 0)        return ret;    /* duplicate the frame data if it's not refcounted */    if (!src->buf[0]) {        ret = av_frame_get_buffer(dst, 32);        if (ret < 0)            return ret;        ret = av_frame_copy(dst, src);        if (ret < 0)            av_frame_unref(dst);        return ret;    }    /* ref the buffers */    for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {        if (!src->buf[i])            continue;        dst->buf[i] = av_buffer_ref(src->buf[i]);        if (!dst->buf[i]) {            ret = AVERROR(ENOMEM);            goto fail;        }    }    if (src->extended_buf) {        dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),                                       src->nb_extended_buf);        if (!dst->extended_buf) {            ret = AVERROR(ENOMEM);            goto fail;        }        dst->nb_extended_buf = src->nb_extended_buf;        for (i = 0; i < src->nb_extended_buf; i++) {            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);            if (!dst->extended_buf[i]) {                ret = AVERROR(ENOMEM);                goto fail;            }        }    }    /* duplicate extended data */    if (src->extended_data != src->data) {        int ch = src->channels;        if (!ch) {            ret = AVERROR(EINVAL);            goto fail;        }        CHECK_CHANNELS_CONSISTENCY(src);        dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);        if (!dst->extended_data) {            ret = AVERROR(ENOMEM);            goto fail;        }        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);    } else        dst->extended_data = dst->data;    memcpy(dst->data,     src->data,     sizeof(src->data));    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));    return 0;fail:    av_frame_unref(dst);    return ret;}
开发者ID:DaveDaCoda,项目名称:mythtv,代码行数:85,


示例4: asf_write_header1

//.........这里部分代码省略.........                avio_wl16(pb, n + 1);                avio_wl16(pb, 26); // name_len                avio_wl16(pb,  3); // value_type                avio_wl32(pb,  4); // value_len                avio_put_str16le(pb, "AspectRatioX");                avio_wl32(pb, sar.num);                avio_wl16(pb, 0);                // the stream number is set like this below                avio_wl16(pb, n + 1);                avio_wl16(pb, 26); // name_len                avio_wl16(pb,  3); // value_type                avio_wl32(pb,  4); // value_len                avio_put_str16le(pb, "AspectRatioY");                avio_wl32(pb, sar.den);            }        }        end_header(pb, hpos2);    } else {        avio_wl32(pb, 0);    }    end_header(pb, hpos);    /* title and other infos */    if (has_title) {        int len;        uint8_t *buf;        AVIOContext *dyn_buf;        if (avio_open_dyn_buf(&dyn_buf) < 0)            return AVERROR(ENOMEM);        hpos = put_header(pb, &ff_asf_comment_header);        for (n = 0; n < FF_ARRAY_ELEMS(tags); n++) {            len = tags[n] ? avio_put_str16le(dyn_buf, tags[n]->value) : 0;            avio_wl16(pb, len);        }        len = avio_close_dyn_buf(dyn_buf, &buf);        avio_write(pb, buf, len);        av_freep(&buf);        end_header(pb, hpos);    }    if (metadata_count) {        AVDictionaryEntry *tag = NULL;        hpos = put_header(pb, &ff_asf_extended_content_header);        avio_wl16(pb, metadata_count);        while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {            put_str16(pb, tag->key);            avio_wl16(pb, 0);            put_str16(pb, tag->value);        }        end_header(pb, hpos);    }    /* chapters using ASF markers */    if (!asf->is_streamed && s->nb_chapters) {        int ret;        if (ret = asf_write_markers(s))            return ret;    }    /* stream headers */    for (n = 0; n < s->nb_streams; n++) {        int64_t es_pos;        //        ASFStream *stream = &asf->streams[n];        enc                 = s->streams[n]->codec;        asf->streams[n].num = n + 1;
开发者ID:26mansi,项目名称:FFmpeg,代码行数:67,


示例5: add_codec

/* add a codec and set the default parameters */static void add_codec(FFServerStream *stream, AVCodecContext *av,                      FFServerConfig *config){    AVStream *st;    AVDictionary **opts, *recommended = NULL;    char *enc_config;    if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))        return;    opts = av->codec_type == AVMEDIA_TYPE_AUDIO ?           &config->audio_opts : &config->video_opts;    av_dict_copy(&recommended, *opts, 0);    av_opt_set_dict2(av->priv_data, opts, AV_OPT_SEARCH_CHILDREN);    av_opt_set_dict2(av, opts, AV_OPT_SEARCH_CHILDREN);    if (av_dict_count(*opts))        av_log(NULL, AV_LOG_WARNING,               "Something is wrong, %d options are not set!/n",               av_dict_count(*opts));    if (!config->stream_use_defaults) {        switch(av->codec_type) {        case AVMEDIA_TYPE_AUDIO:            if (av->bit_rate == 0)                report_config_error(config->filename, config->line_num,                                    AV_LOG_ERROR, &config->errors,                                    "audio bit rate is not set/n");            if (av->sample_rate == 0)                report_config_error(config->filename, config->line_num,                                    AV_LOG_ERROR, &config->errors,                                    "audio sample rate is not set/n");            break;        case AVMEDIA_TYPE_VIDEO:            if (av->width == 0 || av->height == 0)                report_config_error(config->filename, config->line_num,                                    AV_LOG_ERROR, &config->errors,                                    "video size is not set/n");            break;        default:            av_assert0(0);        }        goto done;    }    /* stream_use_defaults = true */    /* compute default parameters */    switch(av->codec_type) {    case AVMEDIA_TYPE_AUDIO:        if (!av_dict_get(recommended, "b", NULL, 0)) {            av->bit_rate = 64000;            av_dict_set_int(&recommended, "b", av->bit_rate, 0);            WARNING("Setting default value for audio bit rate = %d. "                    "Use NoDefaults to disable it./n",                    av->bit_rate);        }        if (!av_dict_get(recommended, "ar", NULL, 0)) {            av->sample_rate = 22050;            av_dict_set_int(&recommended, "ar", av->sample_rate, 0);            WARNING("Setting default value for audio sample rate = %d. "                    "Use NoDefaults to disable it./n",                    av->sample_rate);        }        if (!av_dict_get(recommended, "ac", NULL, 0)) {            av->channels = 1;            av_dict_set_int(&recommended, "ac", av->channels, 0);            WARNING("Setting default value for audio channel count = %d. "                    "Use NoDefaults to disable it./n",                    av->channels);        }        break;    case AVMEDIA_TYPE_VIDEO:        if (!av_dict_get(recommended, "b", NULL, 0)) {            av->bit_rate = 64000;            av_dict_set_int(&recommended, "b", av->bit_rate, 0);            WARNING("Setting default value for video bit rate = %d. "                    "Use NoDefaults to disable it./n",                    av->bit_rate);        }        if (!av_dict_get(recommended, "time_base", NULL, 0)) {            av->time_base.den = 5;            av->time_base.num = 1;            av_dict_set(&recommended, "time_base", "1/5", 0);            WARNING("Setting default value for video frame rate = %d. "                    "Use NoDefaults to disable it./n",                    av->time_base.den);        }        if (!av_dict_get(recommended, "video_size", NULL, 0)) {            av->width = 160;            av->height = 128;            av_dict_set(&recommended, "video_size", "160x128", 0);            WARNING("Setting default value for video size = %dx%d. "                    "Use NoDefaults to disable it./n",                    av->width, av->height);        }        /* Bitrate tolerance is less for streaming */        if (!av_dict_get(recommended, "bt", NULL, 0)) {            av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,//.........这里部分代码省略.........
开发者ID:rcombs,项目名称:FFmpeg,代码行数:101,


示例6: swap_channel_layouts_on_filter

static void swap_channel_layouts_on_filter(AVFilterContext *filter){    AVFilterLink *link = NULL;    int i, j, k;    for (i = 0; i < filter->nb_inputs; i++) {        link = filter->inputs[i];        if (link->type == AVMEDIA_TYPE_AUDIO &&            link->out_channel_layouts->nb_channel_layouts == 1)            break;    }    if (i == filter->nb_inputs)        return;    for (i = 0; i < filter->nb_outputs; i++) {        AVFilterLink *outlink = filter->outputs[i];        int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;        if (outlink->type != AVMEDIA_TYPE_AUDIO ||            outlink->in_channel_layouts->nb_channel_layouts < 2)            continue;        for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {            uint64_t  in_chlayout = link->out_channel_layouts->channel_layouts[0];            uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];            int  in_channels      = av_get_channel_layout_nb_channels(in_chlayout);            int out_channels      = av_get_channel_layout_nb_channels(out_chlayout);            int count_diff        = out_channels - in_channels;            int matched_channels, extra_channels;            int score = 100000;            if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {                /* Compute score in case the input or output layout encodes                   a channel count; in this case the score is not altered by                   the computation afterwards, as in_chlayout and                   out_chlayout have both been set to 0 */                if (FF_LAYOUT2COUNT(in_chlayout))                    in_channels = FF_LAYOUT2COUNT(in_chlayout);                if (FF_LAYOUT2COUNT(out_chlayout))                    out_channels = FF_LAYOUT2COUNT(out_chlayout);                score -= 10000 + FFABS(out_channels - in_channels) +                         (in_channels > out_channels ? 10000 : 0);                in_chlayout = out_chlayout = 0;                /* Let the remaining computation run, even if the score                   value is not altered */            }            /* channel substitution */            for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {                uint64_t cmp0 = ch_subst[k][0];                uint64_t cmp1 = ch_subst[k][1];                if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&                    (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {                    in_chlayout  &= ~cmp0;                    out_chlayout &= ~cmp1;                    /* add score for channel match, minus a deduction for                       having to do the substitution */                    score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;                }            }            /* no penalty for LFE channel mismatch */            if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&                (out_chlayout & AV_CH_LOW_FREQUENCY))                score += 10;            in_chlayout  &= ~AV_CH_LOW_FREQUENCY;            out_chlayout &= ~AV_CH_LOW_FREQUENCY;            matched_channels = av_get_channel_layout_nb_channels(in_chlayout &                                                                 out_chlayout);            extra_channels   = av_get_channel_layout_nb_channels(out_chlayout &                                                                 (~in_chlayout));            score += 10 * matched_channels - 5 * extra_channels;            if (score > best_score ||                (count_diff < best_count_diff && score == best_score)) {                best_score = score;                best_idx   = j;                best_count_diff = count_diff;            }        }        av_assert0(best_idx >= 0);        FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],               outlink->in_channel_layouts->channel_layouts[best_idx]);    }}
开发者ID:Ivnz,项目名称:iFrameExtracotrWithFFMPEG,代码行数:88,


示例7: cuda_transfer_data_from

static int cuda_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,                                   const AVFrame *src){    CUDAFramesContext           *priv = ctx->internal->priv;    AVCUDADeviceContext *device_hwctx = ctx->device_ctx->hwctx;    CudaFunctions                 *cu = device_hwctx->internal->cuda_dl;    CUcontext dummy;    CUresult err;    int i;    err = cu->cuCtxPushCurrent(device_hwctx->cuda_ctx);    if (err != CUDA_SUCCESS)        return AVERROR_UNKNOWN;    for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {        CUDA_MEMCPY2D cpy = {            .srcMemoryType = CU_MEMORYTYPE_DEVICE,            .dstMemoryType = CU_MEMORYTYPE_HOST,            .srcDevice     = (CUdeviceptr)src->data[i],            .dstHost       = dst->data[i],            .srcPitch      = src->linesize[i],            .dstPitch      = dst->linesize[i],            .WidthInBytes  = FFMIN(src->linesize[i], dst->linesize[i]),            .Height        = src->height >> (i ? priv->shift_height : 0),        };        err = cu->cuMemcpy2D(&cpy);        if (err != CUDA_SUCCESS) {            av_log(ctx, AV_LOG_ERROR, "Error transferring the data from the CUDA frame/n");            return AVERROR_UNKNOWN;        }    }    cu->cuCtxPopCurrent(&dummy);    return 0;}static int cuda_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,                                 const AVFrame *src){    CUDAFramesContext           *priv = ctx->internal->priv;    AVCUDADeviceContext *device_hwctx = ctx->device_ctx->hwctx;    CudaFunctions                 *cu = device_hwctx->internal->cuda_dl;    CUcontext dummy;    CUresult err;    int i;    err = cu->cuCtxPushCurrent(device_hwctx->cuda_ctx);    if (err != CUDA_SUCCESS)        return AVERROR_UNKNOWN;    for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {        CUDA_MEMCPY2D cpy = {            .srcMemoryType = CU_MEMORYTYPE_HOST,            .dstMemoryType = CU_MEMORYTYPE_DEVICE,            .srcHost       = src->data[i],            .dstDevice     = (CUdeviceptr)dst->data[i],            .srcPitch      = src->linesize[i],            .dstPitch      = dst->linesize[i],            .WidthInBytes  = FFMIN(src->linesize[i], dst->linesize[i]),            .Height        = src->height >> (i ? priv->shift_height : 0),        };        err = cu->cuMemcpy2D(&cpy);        if (err != CUDA_SUCCESS) {            av_log(ctx, AV_LOG_ERROR, "Error transferring the data from the CUDA frame/n");            return AVERROR_UNKNOWN;        }    }    cu->cuCtxPopCurrent(&dummy);    return 0;}static void cuda_device_uninit(AVHWDeviceContext *ctx){    AVCUDADeviceContext *hwctx = ctx->hwctx;    if (hwctx->internal) {        if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {            hwctx->internal->cuda_dl->cuCtxDestroy(hwctx->cuda_ctx);            hwctx->cuda_ctx = NULL;        }        cuda_free_functions(&hwctx->internal->cuda_dl);    }    av_freep(&hwctx->internal);}
开发者ID:Hero2000,项目名称:CainCamera,代码行数:92,


示例8: opus_decode_packet

static int opus_decode_packet(AVCodecContext *avctx, void *data,                              int *got_frame_ptr, AVPacket *avpkt){    OpusContext *c      = avctx->priv_data;    AVFrame *frame      = data;    const uint8_t *buf  = avpkt->data;    int buf_size        = avpkt->size;    int coded_samples   = 0;    int decoded_samples = INT_MAX;    int delayed_samples = 0;    int i, ret;    /* calculate the number of delayed samples */    for (i = 0; i < c->nb_streams; i++) {        OpusStreamContext *s = &c->streams[i];        s->out[0] =        s->out[1] = NULL;        delayed_samples = FFMAX(delayed_samples,                                s->delayed_samples + av_audio_fifo_size(c->sync_buffers[i]));    }    /* decode the header of the first sub-packet to find out the sample count */    if (buf) {        OpusPacket *pkt = &c->streams[0].packet;        ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);        if (ret < 0) {            av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header./n");            return ret;        }        coded_samples += pkt->frame_count * pkt->frame_duration;        c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);    }    frame->nb_samples = coded_samples + delayed_samples;    /* no input or buffered data => nothing to do */    if (!frame->nb_samples) {        *got_frame_ptr = 0;        return 0;    }    /* setup the data buffers */    ret = ff_get_buffer(avctx, frame, 0);    if (ret < 0)        return ret;    frame->nb_samples = 0;    memset(c->out, 0, c->nb_streams * 2 * sizeof(*c->out));    for (i = 0; i < avctx->channels; i++) {        ChannelMap *map = &c->channel_maps[i];        if (!map->copy)            c->out[2 * map->stream_idx + map->channel_idx] = (float*)frame->extended_data[i];    }    /* read the data from the sync buffers */    for (i = 0; i < c->nb_streams; i++) {        float          **out = c->out + 2 * i;        int sync_size = av_audio_fifo_size(c->sync_buffers[i]);        float sync_dummy[32];        int out_dummy = (!out[0]) | ((!out[1]) << 1);        if (!out[0])            out[0] = sync_dummy;        if (!out[1])            out[1] = sync_dummy;        if (out_dummy && sync_size > FF_ARRAY_ELEMS(sync_dummy))            return AVERROR_BUG;        ret = av_audio_fifo_read(c->sync_buffers[i], (void**)out, sync_size);        if (ret < 0)            return ret;        if (out_dummy & 1)            out[0] = NULL;        else            out[0] += ret;        if (out_dummy & 2)            out[1] = NULL;        else            out[1] += ret;        c->out_size[i] = frame->linesize[0] - ret * sizeof(float);    }    /* decode each sub-packet */    for (i = 0; i < c->nb_streams; i++) {        OpusStreamContext *s = &c->streams[i];        if (i && buf) {            ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);            if (ret < 0) {                av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header./n");                return ret;            }            if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {                av_log(avctx, AV_LOG_ERROR,                       "Mismatching coded sample count in substream %d./n", i);                return AVERROR_INVALIDDATA;            }//.........这里部分代码省略.........
开发者ID:KangLin,项目名称:FFmpeg,代码行数:101,


示例9: fill_slice_long

static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,                            unsigned position, unsigned size){    const H264Context *h = avctx->priv_data;    struct dxva_context *ctx = avctx->hwaccel_context;    unsigned list;    memset(slice, 0, sizeof(*slice));    slice->BSNALunitDataLocation = position;    slice->SliceBytesInBuffer    = size;    slice->wBadSliceChopping     = 0;    slice->first_mb_in_slice     = (h->mb_y >> FIELD_OR_MBAFF_PICTURE) * h->mb_width + h->mb_x;    slice->NumMbsForSlice        = 0; /* XXX it is set once we have all slices */    slice->BitOffsetToSliceData  = get_bits_count(&h->gb);    slice->slice_type            = ff_h264_get_slice_type(h);    if (h->slice_type_fixed)        slice->slice_type += 5;    slice->luma_log2_weight_denom       = h->luma_log2_weight_denom;    slice->chroma_log2_weight_denom     = h->chroma_log2_weight_denom;    if (h->list_count > 0)        slice->num_ref_idx_l0_active_minus1 = h->ref_count[0] - 1;    if (h->list_count > 1)        slice->num_ref_idx_l1_active_minus1 = h->ref_count[1] - 1;    slice->slice_alpha_c0_offset_div2   = h->slice_alpha_c0_offset / 2 - 26;    slice->slice_beta_offset_div2       = h->slice_beta_offset     / 2 - 26;    slice->Reserved8Bits                = 0;    for (list = 0; list < 2; list++) {        unsigned i;        for (i = 0; i < FF_ARRAY_ELEMS(slice->RefPicList[list]); i++) {            if (list < h->list_count && i < h->ref_count[list]) {                const Picture *r = &h->ref_list[list][i];                unsigned plane;                fill_picture_entry(&slice->RefPicList[list][i],                                   ff_dxva2_get_surface_index(ctx, r),                                   r->f.reference == PICT_BOTTOM_FIELD);                for (plane = 0; plane < 3; plane++) {                    int w, o;                    if (plane == 0 && h->luma_weight_flag[list]) {                        w = h->luma_weight[i][list][0];                        o = h->luma_weight[i][list][1];                    } else if (plane >= 1 && h->chroma_weight_flag[list]) {                        w = h->chroma_weight[i][list][plane-1][0];                        o = h->chroma_weight[i][list][plane-1][1];                    } else {                        w = 1 << (plane == 0 ? h->luma_log2_weight_denom :                                               h->chroma_log2_weight_denom);                        o = 0;                    }                    slice->Weights[list][i][plane][0] = w;                    slice->Weights[list][i][plane][1] = o;                }            } else {                unsigned plane;                slice->RefPicList[list][i].bPicEntry = 0xff;                for (plane = 0; plane < 3; plane++) {                    slice->Weights[list][i][plane][0] = 0;                    slice->Weights[list][i][plane][1] = 0;                }            }        }    }    slice->slice_qs_delta    = 0; /* XXX not implemented by Libav */    slice->slice_qp_delta    = h->qscale - h->pps.init_qp;    slice->redundant_pic_cnt = h->redundant_pic_count;    if (h->slice_type == AV_PICTURE_TYPE_B)        slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;    slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;    if (h->deblocking_filter < 2)        slice->disable_deblocking_filter_idc = 1 - h->deblocking_filter;    else        slice->disable_deblocking_filter_idc = h->deblocking_filter;    slice->slice_id = h->current_slice - 1;}
开发者ID:dwbuiten,项目名称:libav,代码行数:75,


示例10: vaapi_encode_h264_init_sequence_params

static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx){    VAAPIEncodeContext                 *ctx = avctx->priv_data;    VAEncSequenceParameterBufferH264  *vseq = ctx->codec_sequence_params;    VAEncPictureParameterBufferH264   *vpic = ctx->codec_picture_params;    VAAPIEncodeH264Context            *priv = ctx->priv_data;    VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;    int i;    {        vseq->seq_parameter_set_id = 0;        vseq->level_idc = avctx->level;        vseq->max_num_ref_frames = 2;        vseq->picture_width_in_mbs  = priv->mb_width;        vseq->picture_height_in_mbs = priv->mb_height;        vseq->seq_fields.bits.chroma_format_idc = 1;        vseq->seq_fields.bits.frame_mbs_only_flag = 1;        vseq->seq_fields.bits.direct_8x8_inference_flag = 1;        vseq->seq_fields.bits.log2_max_frame_num_minus4 = 4;        vseq->seq_fields.bits.pic_order_cnt_type = 0;        if (ctx->input_width  != ctx->aligned_width ||            ctx->input_height != ctx->aligned_height) {            vseq->frame_cropping_flag = 1;            vseq->frame_crop_left_offset   = 0;            vseq->frame_crop_right_offset  =                (ctx->aligned_width - ctx->input_width) / 2;            vseq->frame_crop_top_offset    = 0;            vseq->frame_crop_bottom_offset =                (ctx->aligned_height - ctx->input_height) / 2;        } else {            vseq->frame_cropping_flag = 0;        }        vseq->bits_per_second = avctx->bit_rate;        if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {            vseq->num_units_in_tick = avctx->framerate.num;            vseq->time_scale        = 2 * avctx->framerate.den;        } else {            vseq->num_units_in_tick = avctx->time_base.num;            vseq->time_scale        = 2 * avctx->time_base.den;        }        vseq->intra_period     = ctx->p_per_i * (ctx->b_per_p + 1);        vseq->intra_idr_period = vseq->intra_period;        vseq->ip_period        = ctx->b_per_p + 1;    }    {        vpic->CurrPic.picture_id = VA_INVALID_ID;        vpic->CurrPic.flags      = VA_PICTURE_H264_INVALID;        for (i = 0; i < FF_ARRAY_ELEMS(vpic->ReferenceFrames); i++) {            vpic->ReferenceFrames[i].picture_id = VA_INVALID_ID;            vpic->ReferenceFrames[i].flags      = VA_PICTURE_H264_INVALID;        }        vpic->coded_buf = VA_INVALID_ID;        vpic->pic_parameter_set_id = 0;        vpic->seq_parameter_set_id = 0;        vpic->num_ref_idx_l0_active_minus1 = 0;        vpic->num_ref_idx_l1_active_minus1 = 0;        vpic->pic_fields.bits.entropy_coding_mode_flag =            ((avctx->profile & 0xff) != 66);        vpic->pic_fields.bits.weighted_pred_flag = 0;        vpic->pic_fields.bits.weighted_bipred_idc = 0;        vpic->pic_fields.bits.transform_8x8_mode_flag =            ((avctx->profile & 0xff) >= 100);        vpic->pic_init_qp = priv->fixed_qp_idr;    }    {        mseq->profile_idc = avctx->profile & 0xff;        if (avctx->profile & FF_PROFILE_H264_CONSTRAINED)            mseq->constraint_set1_flag = 1;        if (avctx->profile & FF_PROFILE_H264_INTRA)            mseq->constraint_set3_flag = 1;    }    return 0;}
开发者ID:tkoeppe,项目名称:libav,代码行数:91,


示例11: vaapi_encode_h264_init_slice_params

static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,                                               VAAPIEncodePicture *pic,                                               VAAPIEncodeSlice *slice){    VAAPIEncodeContext                 *ctx = avctx->priv_data;    VAEncSequenceParameterBufferH264  *vseq = ctx->codec_sequence_params;    VAEncPictureParameterBufferH264   *vpic = pic->codec_picture_params;    VAEncSliceParameterBufferH264   *vslice = slice->codec_slice_params;    VAAPIEncodeH264Context            *priv = ctx->priv_data;    VAAPIEncodeH264Slice            *pslice;    VAAPIEncodeH264MiscSliceParams  *mslice;    int i;    slice->priv_data = av_mallocz(sizeof(*pslice));    if (!slice->priv_data)        return AVERROR(ENOMEM);    pslice = slice->priv_data;    mslice = &pslice->misc_slice_params;    if (pic->type == PICTURE_TYPE_IDR)        mslice->nal_unit_type = NAL_IDR_SLICE;    else        mslice->nal_unit_type = NAL_SLICE;    switch (pic->type) {    case PICTURE_TYPE_IDR:        vslice->slice_type  = SLICE_TYPE_I;        mslice->nal_ref_idc = 3;        break;    case PICTURE_TYPE_I:        vslice->slice_type  = SLICE_TYPE_I;        mslice->nal_ref_idc = 2;        break;    case PICTURE_TYPE_P:        vslice->slice_type  = SLICE_TYPE_P;        mslice->nal_ref_idc = 1;        break;    case PICTURE_TYPE_B:        vslice->slice_type  = SLICE_TYPE_B;        mslice->nal_ref_idc = 0;        break;    default:        av_assert0(0 && "invalid picture type");    }    // Only one slice per frame.    vslice->macroblock_address = 0;    vslice->num_macroblocks = priv->mb_width * priv->mb_height;    vslice->macroblock_info = VA_INVALID_ID;    vslice->pic_parameter_set_id = vpic->pic_parameter_set_id;    vslice->idr_pic_id = priv->idr_pic_count++;    vslice->pic_order_cnt_lsb = pic->display_order &        ((1 << (4 + vseq->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4)) - 1);    for (i = 0; i < FF_ARRAY_ELEMS(vslice->RefPicList0); i++) {        vslice->RefPicList0[i].picture_id = VA_INVALID_ID;        vslice->RefPicList0[i].flags      = VA_PICTURE_H264_INVALID;        vslice->RefPicList1[i].picture_id = VA_INVALID_ID;        vslice->RefPicList1[i].flags      = VA_PICTURE_H264_INVALID;    }    av_assert0(pic->nb_refs <= 2);    if (pic->nb_refs >= 1) {        // Backward reference for P- or B-frame.        av_assert0(pic->type == PICTURE_TYPE_P ||                   pic->type == PICTURE_TYPE_B);        vslice->num_ref_idx_l0_active_minus1 = 0;        vslice->RefPicList0[0] = vpic->ReferenceFrames[0];    }    if (pic->nb_refs >= 2) {        // Forward reference for B-frame.        av_assert0(pic->type == PICTURE_TYPE_B);        vslice->num_ref_idx_l1_active_minus1 = 0;        vslice->RefPicList1[0] = vpic->ReferenceFrames[1];    }    if (pic->type == PICTURE_TYPE_B)        vslice->slice_qp_delta = priv->fixed_qp_b - vpic->pic_init_qp;    else if (pic->type == PICTURE_TYPE_P)        vslice->slice_qp_delta = priv->fixed_qp_p - vpic->pic_init_qp;    else        vslice->slice_qp_delta = priv->fixed_qp_idr - vpic->pic_init_qp;    vslice->direct_spatial_mv_pred_flag = 1;    return 0;}
开发者ID:tkoeppe,项目名称:libav,代码行数:92,


示例12: dxva2_init_pool

static int dxva2_init_pool(AVHWFramesContext *ctx){    AVDXVA2FramesContext *frames_hwctx = ctx->hwctx;    AVDXVA2DeviceContext *device_hwctx = ctx->device_ctx->hwctx;    DXVA2FramesContext              *s = ctx->internal->priv;    int decode = (frames_hwctx->surface_type == DXVA2_VideoDecoderRenderTarget);    int i;    HRESULT hr;    if (ctx->initial_pool_size <= 0)        return 0;    hr = IDirect3DDeviceManager9_OpenDeviceHandle(device_hwctx->devmgr, &s->device_handle);    if (FAILED(hr)) {        av_log(ctx, AV_LOG_ERROR, "Failed to open device handle/n");        return AVERROR_UNKNOWN;    }    hr = IDirect3DDeviceManager9_GetVideoService(device_hwctx->devmgr,                                                 s->device_handle,                                                 decode ? &video_decoder_service : &video_processor_service,                                                 (void **)&s->service);    if (FAILED(hr)) {        av_log(ctx, AV_LOG_ERROR, "Failed to create the video service/n");        return AVERROR_UNKNOWN;    }    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {        if (ctx->sw_format == supported_formats[i].pix_fmt) {            s->format = supported_formats[i].d3d_format;            break;        }    }    if (i == FF_ARRAY_ELEMS(supported_formats)) {        av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s/n",               av_get_pix_fmt_name(ctx->sw_format));        return AVERROR(EINVAL);    }    s->surfaces_internal = av_mallocz_array(ctx->initial_pool_size,                                            sizeof(*s->surfaces_internal));    if (!s->surfaces_internal)        return AVERROR(ENOMEM);    hr = IDirectXVideoAccelerationService_CreateSurface(s->service,                                                        ctx->width, ctx->height,                                                        ctx->initial_pool_size - 1,                                                        s->format, D3DPOOL_DEFAULT, 0,                                                        frames_hwctx->surface_type,                                                        s->surfaces_internal, NULL);    if (FAILED(hr)) {        av_log(ctx, AV_LOG_ERROR, "Could not create the surfaces/n");        return AVERROR_UNKNOWN;    }    ctx->internal->pool_internal = av_buffer_pool_init2(sizeof(*s->surfaces_internal),                                                        ctx, dxva2_pool_alloc, NULL);    if (!ctx->internal->pool_internal)        return AVERROR(ENOMEM);    frames_hwctx->surfaces    = s->surfaces_internal;    frames_hwctx->nb_surfaces = ctx->initial_pool_size;    return 0;}
开发者ID:AddictXQ,项目名称:FFmpeg,代码行数:66,


示例13: ff_h264_fill_default_ref_list

int ff_h264_fill_default_ref_list(H264Context *h){    int i, len;    if (h->slice_type_nos == AV_PICTURE_TYPE_B) {        H264Picture *sorted[32];        int cur_poc, list;        int lens[2];        if (FIELD_PICTURE(h))            cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];        else            cur_poc = h->cur_pic_ptr->poc;        for (list = 0; list < 2; list++) {            len  = add_sorted(sorted,       h->short_ref, h->short_ref_count, cur_poc, 1 ^ list);            len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list);            av_assert0(len <= 32);            len  = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]),                                  sorted, len, 0, h->picture_structure);            len += build_def_list(h->default_ref_list[list] + len,                                  FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,                                  h->long_ref, 16, 1, h->picture_structure);            av_assert0(len <= 32);            if (len < h->ref_count[list])                memset(&h->default_ref_list[list][len], 0, sizeof(H264Picture) * (h->ref_count[list] - len));            lens[list] = len;        }        if (lens[0] == lens[1] && lens[1] > 1) {            for (i = 0; i < lens[0] &&                        h->default_ref_list[0][i].f.buf[0]->buffer ==                        h->default_ref_list[1][i].f.buf[0]->buffer; i++);            if (i == lens[0]) {                H264Picture tmp;                COPY_PICTURE(&tmp, &h->default_ref_list[1][0]);                COPY_PICTURE(&h->default_ref_list[1][0], &h->default_ref_list[1][1]);                COPY_PICTURE(&h->default_ref_list[1][1], &tmp);            }        }    } else {        len  = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]),                              h->short_ref, h->short_ref_count, 0, h->picture_structure);        len += build_def_list(h->default_ref_list[0] + len,                              FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,                              h-> long_ref, 16, 1, h->picture_structure);        av_assert0(len <= 32);        if (len < h->ref_count[0])            memset(&h->default_ref_list[0][len], 0, sizeof(H264Picture) * (h->ref_count[0] - len));    }#ifdef TRACE    for (i = 0; i < h->ref_count[0]; i++) {        tprintf(h->avctx, "List0: %s fn:%d 0x%p/n",                (h->default_ref_list[0][i].long_ref ? "LT" : "ST"),                h->default_ref_list[0][i].pic_id,                h->default_ref_list[0][i].f.data[0]);    }    if (h->slice_type_nos == AV_PICTURE_TYPE_B) {        for (i = 0; i < h->ref_count[1]; i++) {            tprintf(h->avctx, "List1: %s fn:%d 0x%p/n",                    (h->default_ref_list[1][i].long_ref ? "LT" : "ST"),                    h->default_ref_list[1][i].pic_id,                    h->default_ref_list[1][i].f.data[0]);        }    }#endif    return 0;}
开发者ID:bblu,项目名称:FFmpeg,代码行数:71,


示例14: vdpau_hevc_start_frame

//.........这里部分代码省略.........    }    /* See section 7.4.7.2 of the specification. */    info->NumPocTotalCurr = ff_hevc_frame_nb_refs(h);    if (sh->short_term_ref_pic_set_sps_flag == 0 && sh->short_term_rps) {        /* Corresponds to specification field, NumDeltaPocs[RefRpsIdx].           Only applicable when short_term_ref_pic_set_sps_flag == 0.           Implementations will ignore this value in other cases. See 7.4.8. */        info->NumDeltaPocsOfRefRpsIdx = sh->short_term_rps->rps_idx_num_delta_pocs;    }    /* Section 7.6.3.1 of the H.265/HEVC Specification defines the syntax of       the slice_segment_header. This header contains information that       some VDPAU implementations may choose to skip. The VDPAU API       requires client applications to track the number of bits used in the       slice header for structures associated with short term and long term       reference pictures. First, VDPAU requires the number of bits used by       the short_term_ref_pic_set array in the slice_segment_header. */    info->NumShortTermPictureSliceHeaderBits = sh->short_term_ref_pic_set_size;    /* Second, VDPAU requires the number of bits used for long term reference       pictures in the slice_segment_header. This is equal to the number       of bits used for the contents of the block beginning with       "if(long_term_ref_pics_present_flag)". */    info->NumLongTermPictureSliceHeaderBits = sh->long_term_ref_pic_set_size;    /* The value of PicOrderCntVal of the picture in the access unit       containing the SEI message. The picture being decoded. */    info->CurrPicOrderCntVal = h->poc;    /* Slice Decoding Process - Reference Picture Sets */    for (size_t i = 0; i < 16; i++) {        info->RefPics[i] = VDP_INVALID_HANDLE;        info->PicOrderCntVal[i] = 0;        info->IsLongTerm[i] = 0;    }    for (size_t i = 0, j = 0; i < FF_ARRAY_ELEMS(h->DPB); i++) {        const HEVCFrame *frame = &h->DPB[i];        if (frame != h->ref && (frame->flags & (HEVC_FRAME_FLAG_LONG_REF |                                                HEVC_FRAME_FLAG_SHORT_REF))) {            if (j > 15) {                av_log(avctx, AV_LOG_WARNING,                     "VDPAU only supports up to 16 references in the DPB. "                     "This frame may not be decoded correctly./n");                break;            }            /* Array of video reference surfaces.               Set any unused positions to VDP_INVALID_HANDLE. */            info->RefPics[j] = ff_vdpau_get_surface_id(frame->frame);            /* Array of picture order counts. These correspond to positions               in the RefPics array. */            info->PicOrderCntVal[j] = frame->poc;            /* Array used to specify whether a particular RefPic is               a long term reference. A value of "1" indicates a long-term               reference. */            // XXX: Setting this caused glitches in the nvidia implementation            // Always setting it to zero, produces correct results            //info->IsLongTerm[j] = frame->flags & HEVC_FRAME_FLAG_LONG_REF;            info->IsLongTerm[j] = 0;            j++;        }    }    /* Copy of specification field, see Section 8.3.2 of the       H.265/HEVC Specification. */    info->NumPocStCurrBefore = h->rps[ST_CURR_BEF].nb_refs;    if (info->NumPocStCurrBefore > 8) {        av_log(avctx, AV_LOG_WARNING,             "VDPAU only supports up to 8 references in StCurrBefore. "             "This frame may not be decoded correctly./n");
开发者ID:KangLin,项目名称:FFmpeg,代码行数:67,


示例15: grab_read_header

static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap){    VideoData *s = s1->priv_data;    AVStream *st;    int video_fd;    int desired_palette, desired_depth;    struct video_tuner tuner;    struct video_audio audio;    struct video_picture pict;    int j;    int vformat_num = FF_ARRAY_ELEMS(video_formats);    if (ap->time_base.den <= 0) {        av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)/n", ap->time_base.den);        return -1;    }    s->time_base = ap->time_base;    s->video_win.width = ap->width;    s->video_win.height = ap->height;    st = av_new_stream(s1, 0);    if (!st)        return AVERROR(ENOMEM);    av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */    video_fd = open(s1->filename, O_RDWR);    if (video_fd < 0) {        av_log(s1, AV_LOG_ERROR, "%s: %s/n", s1->filename, strerror(errno));        goto fail;    }    if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) {        av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s/n", strerror(errno));        goto fail;    }    if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {        av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture/n");        goto fail;    }    /* no values set, autodetect them */    if (s->video_win.width <= 0 || s->video_win.height <= 0) {        if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) {            av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s/n", strerror(errno));            goto fail;        }    }    if(avcodec_check_dimensions(s1, s->video_win.width, s->video_win.height) < 0)        return -1;    desired_palette = -1;    desired_depth = -1;    for (j = 0; j < vformat_num; j++) {        if (ap->pix_fmt == video_formats[j].pix_fmt) {            desired_palette = video_formats[j].palette;            desired_depth = video_formats[j].depth;            break;        }    }    /* set tv standard */    if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {        if (!strcasecmp(ap->standard, "pal"))            tuner.mode = VIDEO_MODE_PAL;        else if (!strcasecmp(ap->standard, "secam"))            tuner.mode = VIDEO_MODE_SECAM;        else            tuner.mode = VIDEO_MODE_NTSC;        ioctl(video_fd, VIDIOCSTUNER, &tuner);    }    /* unmute audio */    audio.audio = 0;    ioctl(video_fd, VIDIOCGAUDIO, &audio);    memcpy(&s->audio_saved, &audio, sizeof(audio));    audio.flags &= ~VIDEO_AUDIO_MUTE;    ioctl(video_fd, VIDIOCSAUDIO, &audio);    ioctl(video_fd, VIDIOCGPICT, &pict);#if 0    printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d/n",           pict.colour,           pict.hue,           pict.brightness,           pict.contrast,           pict.whiteness);#endif    /* try to choose a suitable video format */    pict.palette = desired_palette;    pict.depth= desired_depth;    if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) {        for (j = 0; j < vformat_num; j++) {            pict.palette = video_formats[j].palette;            pict.depth = video_formats[j].depth;            if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))                break;        }//.........这里部分代码省略.........
开发者ID:BOTCrusher,项目名称:sagetv,代码行数:101,


示例16: fill_picture_parameters

static void fill_picture_parameters(struct dxva_context *ctx, const H264Context *h,                                    DXVA_PicParams_H264 *pp){    const Picture *current_picture = h->cur_pic_ptr;    int i, j;    memset(pp, 0, sizeof(*pp));    /* Configure current picture */    fill_picture_entry(&pp->CurrPic,                       ff_dxva2_get_surface_index(ctx, current_picture),                       h->picture_structure == PICT_BOTTOM_FIELD);    /* Configure the set of references */    pp->UsedForReferenceFlags  = 0;    pp->NonExistingFrameFlags  = 0;    for (i = 0, j = 0; i < FF_ARRAY_ELEMS(pp->RefFrameList); i++) {        const Picture *r;        if (j < h->short_ref_count) {            r = h->short_ref[j++];        } else {            r = NULL;            while (!r && j < h->short_ref_count + 16)                r = h->long_ref[j++ - h->short_ref_count];        }        if (r) {            fill_picture_entry(&pp->RefFrameList[i],                               ff_dxva2_get_surface_index(ctx, r),                               r->long_ref != 0);            if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)                pp->FieldOrderCntList[i][0] = r->field_poc[0];            if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)                pp->FieldOrderCntList[i][1] = r->field_poc[1];            pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;            if (r->f.reference & PICT_TOP_FIELD)                pp->UsedForReferenceFlags |= 1 << (2*i + 0);            if (r->f.reference & PICT_BOTTOM_FIELD)                pp->UsedForReferenceFlags |= 1 << (2*i + 1);        } else {            pp->RefFrameList[i].bPicEntry = 0xff;            pp->FieldOrderCntList[i][0]   = 0;            pp->FieldOrderCntList[i][1]   = 0;            pp->FrameNumList[i]           = 0;        }    }    pp->wFrameWidthInMbsMinus1        = h->mb_width  - 1;    pp->wFrameHeightInMbsMinus1       = h->mb_height - 1;    pp->num_ref_frames                = h->sps.ref_frame_count;    pp->wBitFields                    = ((h->picture_structure != PICT_FRAME) <<  0) |                                        ((h->sps.mb_aff &&                                        (h->picture_structure == PICT_FRAME)) <<  1) |                                        (h->sps.residual_color_transform_flag <<  2) |                                        /* sp_for_switch_flag (not implemented by Libav) */                                        (0                                    <<  3) |                                        (h->sps.chroma_format_idc             <<  4) |                                        ((h->nal_ref_idc != 0)                <<  6) |                                        (h->pps.constrained_intra_pred        <<  7) |                                        (h->pps.weighted_pred                 <<  8) |                                        (h->pps.weighted_bipred_idc           <<  9) |                                        /* MbsConsecutiveFlag */                                        (1                                    << 11) |                                        (h->sps.frame_mbs_only_flag           << 12) |                                        (h->pps.transform_8x8_mode            << 13) |                                        ((h->sps.level_idc >= 31)             << 14) |                                        /* IntraPicFlag (Modified if we detect a non                                         * intra slice in dxva2_h264_decode_slice) */                                        (1                                    << 15);    pp->bit_depth_luma_minus8         = h->sps.bit_depth_luma - 8;    pp->bit_depth_chroma_minus8       = h->sps.bit_depth_chroma - 8;    if (ctx->workaround & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG)        pp->Reserved16Bits            = 0;    else        pp->Reserved16Bits            = 3; /* FIXME is there a way to detect the right mode ? */    pp->StatusReportFeedbackNumber    = 1 + ctx->report_id++;    pp->CurrFieldOrderCnt[0] = 0;    if ((h->picture_structure & PICT_TOP_FIELD) &&        current_picture->field_poc[0] != INT_MAX)        pp->CurrFieldOrderCnt[0] = current_picture->field_poc[0];    pp->CurrFieldOrderCnt[1] = 0;    if ((h->picture_structure & PICT_BOTTOM_FIELD) &&        current_picture->field_poc[1] != INT_MAX)        pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1];    pp->pic_init_qs_minus26           = h->pps.init_qs - 26;    pp->chroma_qp_index_offset        = h->pps.chroma_qp_index_offset[0];    pp->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];    pp->ContinuationFlag              = 1;    pp->pic_init_qp_minus26           = h->pps.init_qp - 26;    pp->num_ref_idx_l0_active_minus1  = h->pps.ref_count[0] - 1;    pp->num_ref_idx_l1_active_minus1  = h->pps.ref_count[1] - 1;    pp->Reserved8BitsA                = 0;    pp->frame_num                     = h->frame_num;    pp->log2_max_frame_num_minus4     = h->sps.log2_max_frame_num - 4;    pp->pic_order_cnt_type            = h->sps.poc_type;    if (h->sps.poc_type == 0)        pp->log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;    else if (h->sps.poc_type == 1)        pp->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;//.........这里部分代码省略.........
开发者ID:dwbuiten,项目名称:libav,代码行数:101,


示例17: search_for_quantizers_faac

static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,                                       SingleChannelElement *sce,                                       const float lambda){    int start = 0, i, w, w2, g;    float uplim[128], maxq[128];    int minq, maxsf;    float distfact = ((sce->ics.num_windows > 1) ? 85.80 : 147.84) / lambda;    int last = 0, lastband = 0, curband = 0;    float avg_energy = 0.0;    if (sce->ics.num_windows == 1) {        start = 0;        for (i = 0; i < 1024; i++) {            if (i - start >= sce->ics.swb_sizes[curband]) {                start += sce->ics.swb_sizes[curband];                curband++;            }            if (sce->coeffs[i]) {                avg_energy += sce->coeffs[i] * sce->coeffs[i];                last = i;                lastband = curband;            }        }    } else {        for (w = 0; w < 8; w++) {            const float *coeffs = sce->coeffs + w*128;            start = 0;            for (i = 0; i < 128; i++) {                if (i - start >= sce->ics.swb_sizes[curband]) {                    start += sce->ics.swb_sizes[curband];                    curband++;                }                if (coeffs[i]) {                    avg_energy += coeffs[i] * coeffs[i];                    last = FFMAX(last, i);                    lastband = FFMAX(lastband, curband);                }            }        }    }    last++;    avg_energy /= last;    if (avg_energy == 0.0f) {        for (i = 0; i < FF_ARRAY_ELEMS(sce->sf_idx); i++)            sce->sf_idx[i] = SCALE_ONE_POS;        return;    }    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {        start = w*128;        for (g = 0; g < sce->ics.num_swb; g++) {            float *coefs   = sce->coeffs + start;            const int size = sce->ics.swb_sizes[g];            int start2 = start, end2 = start + size, peakpos = start;            float maxval = -1, thr = 0.0f, t;            maxq[w*16+g] = 0.0f;            if (g > lastband) {                maxq[w*16+g] = 0.0f;                start += size;                for (w2 = 0; w2 < sce->ics.group_len[w]; w2++)                    memset(coefs + w2*128, 0, sizeof(coefs[0])*size);                continue;            }            for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {                for (i = 0; i < size; i++) {                    float t = coefs[w2*128+i]*coefs[w2*128+i];                    maxq[w*16+g] = FFMAX(maxq[w*16+g], fabsf(coefs[w2*128 + i]));                    thr += t;                    if (sce->ics.num_windows == 1 && maxval < t) {                        maxval  = t;                        peakpos = start+i;                    }                }            }            if (sce->ics.num_windows == 1) {                start2 = FFMAX(peakpos - 2, start2);                end2   = FFMIN(peakpos + 3, end2);            } else {                start2 -= start;                end2   -= start;            }            start += size;            thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband);            t   = 1.0 - (1.0 * start2 / last);            uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075);        }    }    memset(sce->sf_idx, 0, sizeof(sce->sf_idx));    abs_pow34_v(s->scoefs, sce->coeffs, 1024);    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {        start = w*128;        for (g = 0;  g < sce->ics.num_swb; g++) {            const float *coefs  = sce->coeffs + start;            const float *scaled = s->scoefs   + start;            const int size      = sce->ics.swb_sizes[g];            int scf, prev_scf, step;            int min_scf = -1, max_scf = 256;            float curdiff;            if (maxq[w*16+g] < 21.544) {                sce->zeroes[w*16+g] = 1;                start += size;//.........这里部分代码省略.........
开发者ID:darealshinji,项目名称:ffmbc,代码行数:101,


示例18: mp3_write_xing

/* * Write an empty XING header and initialize respective data. */static void mp3_write_xing(AVFormatContext *s){    MP3Context       *mp3 = s->priv_data;    AVCodecParameters *par = s->streams[mp3->audio_stream_idx]->codecpar;    AVDictionaryEntry *enc = av_dict_get(s->streams[mp3->audio_stream_idx]->metadata, "encoder", NULL, 0);    AVIOContext *dyn_ctx;    int32_t        header;    MPADecodeHeader  mpah;    int srate_idx, i, channels;    int bitrate_idx;    int best_bitrate_idx;    int best_bitrate_error = INT_MAX;    int ret;    int ver = 0;    int lsf, bytes_needed;    if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) || !mp3->write_xing)        return;    for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) {        const uint16_t base_freq = avpriv_mpa_freq_tab[i];        if      (par->sample_rate == base_freq)     ver = 0x3; // MPEG 1        else if (par->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2        else if (par->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5        else continue;        srate_idx = i;        break;    }    if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) {        av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing "               "header./n");        return;    }    switch (par->channels) {    case 1:  channels = MPA_MONO;                                          break;    case 2:  channels = MPA_STEREO;                                        break;    default: av_log(s, AV_LOG_WARNING, "Unsupported number of channels, "                    "not writing Xing header./n");             return;    }    /* dummy MPEG audio header */    header  =  0xff                                  << 24; // sync    header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/    header |= (srate_idx << 2) << 8;    header |= channels << 6;    lsf = !((header & (1 << 20) && header & (1 << 19)));    mp3->xing_offset = xing_offtbl[ver != 3][channels == 1] + 4;    bytes_needed     = mp3->xing_offset + XING_SIZE;    for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) {        int bit_rate = 1000 * avpriv_mpa_bitrate_tab[lsf][3 - 1][bitrate_idx];        int error    = FFABS(bit_rate - par->bit_rate);        if (error < best_bitrate_error){            best_bitrate_error = error;            best_bitrate_idx   = bitrate_idx;        }    }    for (bitrate_idx = best_bitrate_idx; bitrate_idx < 15; bitrate_idx++) {        int32_t mask = bitrate_idx << (4 + 8);        header |= mask;        avpriv_mpegaudio_decode_header(&mpah, header);        if (bytes_needed <= mpah.frame_size)            break;        header &= ~mask;    }    ret = avio_open_dyn_buf(&dyn_ctx);    if (ret < 0)        return;    avio_wb32(dyn_ctx, header);    avpriv_mpegaudio_decode_header(&mpah, header);    av_assert0(mpah.frame_size >= bytes_needed);    ffio_fill(dyn_ctx, 0, mp3->xing_offset - 4);    ffio_wfourcc(dyn_ctx, "Xing");    avio_wb32(dyn_ctx, 0x01 | 0x02 | 0x04 | 0x08);  // frames / size / TOC / vbr scale    mp3->size = mpah.frame_size;    mp3->want = 1;    avio_wb32(dyn_ctx, 0);  // frames    avio_wb32(dyn_ctx, 0);  // size//.........这里部分代码省略.........
开发者ID:Rodeo314,项目名称:tim-libav,代码行数:101,


示例19: avpriv_mpeg4audio_get_config

int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size){    GetBitContext gb;    int specific_config_bitindex;    init_get_bits(&gb, buf, buf_size*8);    c->object_type = get_object_type(&gb);    c->sample_rate = get_sample_rate(&gb, &c->sampling_index);    c->chan_config = get_bits(&gb, 4);    if (c->chan_config < FF_ARRAY_ELEMS(ff_mpeg4audio_channels))        c->channels = ff_mpeg4audio_channels[c->chan_config];    c->sbr = -1;    c->ps  = -1;    if (c->object_type == AOT_SBR || (c->object_type == AOT_PS &&        // check for W6132 Annex YYYY draft MP3onMP4        !(show_bits(&gb, 3) & 0x03 && !(show_bits(&gb, 9) & 0x3F)))) {        if (c->object_type == AOT_PS)            c->ps = 1;        c->ext_object_type = AOT_SBR;        c->sbr = 1;        c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);        c->object_type = get_object_type(&gb);        if (c->object_type == AOT_ER_BSAC)            c->ext_chan_config = get_bits(&gb, 4);    } else {        c->ext_object_type = AOT_NULL;        c->ext_sample_rate = 0;    }    specific_config_bitindex = get_bits_count(&gb);    if (c->object_type == AOT_ALS) {        skip_bits(&gb, 5);        if (show_bits_long(&gb, 24) != MKBETAG('/0','A','L','S'))            skip_bits_long(&gb, 24);        specific_config_bitindex = get_bits_count(&gb);        if (parse_config_ALS(&gb, c))            return -1;    }    if (c->ext_object_type != AOT_SBR) {        while (get_bits_left(&gb) > 15) {            if (show_bits(&gb, 11) == 0x2b7) { // sync extension                get_bits(&gb, 11);                c->ext_object_type = get_object_type(&gb);                if (c->ext_object_type == AOT_SBR && (c->sbr = get_bits1(&gb)) == 1)                    c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);                if (get_bits_left(&gb) > 11 && get_bits(&gb, 11) == 0x548)                    c->ps = get_bits1(&gb);                break;            } else                get_bits1(&gb); // skip 1 bit        }    }    //PS requires SBR    if (!c->sbr)        c->ps = 0;    //Limit implicit PS to the HE-AACv2 Profile    if ((c->ps == -1 && c->object_type != AOT_AAC_LC) || c->channels & ~0x01)        c->ps = 0;    return specific_config_bitindex;}
开发者ID:LibXenonProject,项目名称:libav-libxenon,代码行数:65,


示例20: strcpy

static const char *srt_to_ass(AVCodecContext *avctx, char *out, char *out_end,                              const char *in, int x1, int y1, int x2, int y2){    char *param, buffer[128], tmp[128];    int len, tag_close, sptr = 1, line_start = 1, an = 0, end = 0;    SrtStack stack[16];    stack[0].tag[0] = 0;    strcpy(stack[0].param[PARAM_SIZE],  "{//fs}");    strcpy(stack[0].param[PARAM_COLOR], "{//c}");    strcpy(stack[0].param[PARAM_FACE],  "{//fn}");    if (x1 >= 0 && y1 >= 0) {        if (x2 >= 0 && y2 >= 0 && (x2 != x1 || y2 != y1))            snprintf(out, out_end-out,                            "{//an1}{//move(%d,%d,%d,%d)}", x1, y1, x2, y2);        else            snprintf(out, out_end-out, "{//an1}{//pos(%d,%d)}", x1, y1);        out += strlen(out);    }    for (; out < out_end && !end && *in; in++) {        switch (*in) {        case '/r':            break;        case '/n':            if (line_start) {                end = 1;                break;            }            while (out[-1] == ' ')                out--;            snprintf(out, out_end-out, "//N");            if(out<out_end) out += strlen(out);            line_start = 1;            break;        case ' ':            if (!line_start)                *out++ = *in;            break;        case '{':    /* skip all {/xxx} substrings except for {/an%d}                        and all microdvd like styles such as {Y:xxx} */            len = 0;            an += sscanf(in, "{//an%*1u}%n", &len) >= 0 && len > 0;            if ((an != 1 && (len = 0, sscanf(in, "{//%*[^}]}%n", &len) >= 0 && len > 0)) ||                (len = 0, sscanf(in, "{%*1[CcFfoPSsYy]:%*[^}]}%n", &len) >= 0 && len > 0)) {                in += len - 1;            } else                *out++ = *in;            break;        case '<':            tag_close = in[1] == '/';            len = 0;            if (sscanf(in+tag_close+1, "%127[^>]>%n", buffer, &len) >= 1 && len > 0) {                if ((param = strchr(buffer, ' ')))                    *param++ = 0;                if ((!tag_close && sptr < FF_ARRAY_ELEMS(stack)) ||                    ( tag_close && sptr > 0 && !strcmp(stack[sptr-1].tag, buffer))) {                    int i, j, unknown = 0;                    in += len + tag_close;                    if (!tag_close)                        memset(stack+sptr, 0, sizeof(*stack));                    if (!strcmp(buffer, "font")) {                        if (tag_close) {                            for (i=PARAM_NUMBER-1; i>=0; i--)                                if (stack[sptr-1].param[i][0])                                    for (j=sptr-2; j>=0; j--)                                        if (stack[j].param[i][0]) {                                            snprintf(out, out_end-out,                                                            "%s", stack[j].param[i]);                                            if(out<out_end) out += strlen(out);                                            break;                                        }                        } else {                            while (param) {                                if (!strncmp(param, "size=", 5)) {                                    unsigned font_size;                                    param += 5 + (param[5] == '"');                                    if (sscanf(param, "%u", &font_size) == 1) {                                        snprintf(stack[sptr].param[PARAM_SIZE],                                             sizeof(stack[0].param[PARAM_SIZE]),                                             "{//fs%u}", font_size);                                    }                                } else if (!strncmp(param, "color=", 6)) {                                    param += 6 + (param[6] == '"');                                    snprintf(stack[sptr].param[PARAM_COLOR],                                         sizeof(stack[0].param[PARAM_COLOR]),                                         "{//c&H%X&}",                                         html_color_parse(avctx, param));                                } else if (!strncmp(param, "face=", 5)) {                                    param += 5 + (param[5] == '"');                                    len = strcspn(param,                                                  param[-1] == '"' ? "/"" :" ");                                    av_strlcpy(tmp, param,                                               FFMIN(sizeof(tmp), len+1));                                    param += len;                                    snprintf(stack[sptr].param[PARAM_FACE],                                             sizeof(stack[0].param[PARAM_FACE]),                                             "{//fn%s}", tmp);                                }//.........这里部分代码省略.........
开发者ID:markjreed,项目名称:vice-emu,代码行数:101,


示例21: fill_picture_parameters

//.........这里部分代码省略.........    pp->dwCodingParamToolFlags = (sps->scaling_list_enable_flag                  <<  0) |                                 (sps->amp_enabled_flag                          <<  1) |                                 (sps->sao_enabled                               <<  2) |                                 (sps->pcm_enabled_flag                          <<  3) |                                 ((sps->pcm_enabled_flag ? (sps->pcm.bit_depth - 1) : 0)            <<  4) |                                 ((sps->pcm_enabled_flag ? (sps->pcm.bit_depth_chroma - 1) : 0)     <<  8) |                                 ((sps->pcm_enabled_flag ? (sps->pcm.log2_min_pcm_cb_size - 3) : 0) << 12) |                                 ((sps->pcm_enabled_flag ? (sps->pcm.log2_max_pcm_cb_size - sps->pcm.log2_min_pcm_cb_size) : 0) << 14) |                                 (sps->pcm.loop_filter_disable_flag              << 16) |                                 (sps->long_term_ref_pics_present_flag           << 17) |                                 (sps->sps_temporal_mvp_enabled_flag             << 18) |                                 (sps->sps_strong_intra_smoothing_enable_flag    << 19) |                                 (pps->dependent_slice_segments_enabled_flag     << 20) |                                 (pps->output_flag_present_flag                  << 21) |                                 (pps->num_extra_slice_header_bits               << 22) |                                 (pps->sign_data_hiding_flag                     << 25) |                                 (pps->cabac_init_present_flag                   << 26) |                                 (0                                              << 27);    pp->dwCodingSettingPicturePropertyFlags = (pps->constrained_intra_pred_flag                   <<  0) |                                              (pps->transform_skip_enabled_flag                   <<  1) |                                              (pps->cu_qp_delta_enabled_flag                      <<  2) |                                              (pps->pic_slice_level_chroma_qp_offsets_present_flag <<  3) |                                              (pps->weighted_pred_flag                            <<  4) |                                              (pps->weighted_bipred_flag                          <<  5) |                                              (pps->transquant_bypass_enable_flag                 <<  6) |                                              (pps->tiles_enabled_flag                            <<  7) |                                              (pps->entropy_coding_sync_enabled_flag              <<  8) |                                              (pps->uniform_spacing_flag                          <<  9) |                                              ((pps->tiles_enabled_flag ? pps->loop_filter_across_tiles_enabled_flag : 0) << 10) |                                              (pps->seq_loop_filter_across_slices_enabled_flag    << 11) |                                              (pps->deblocking_filter_override_enabled_flag       << 12) |                                              (pps->disable_dbf                                   << 13) |                                              (pps->lists_modification_present_flag               << 14) |                                              (pps->slice_header_extension_present_flag           << 15) |                                              (IS_IRAP(h)                                         << 16) |                                              (IS_IDR(h)                                          << 17) |                                              /* IntraPicFlag */                                              (IS_IRAP(h)                                         << 18) |                                              (0                                                  << 19);    pp->pps_cb_qp_offset            = pps->cb_qp_offset;    pp->pps_cr_qp_offset            = pps->cr_qp_offset;    if (pps->tiles_enabled_flag) {        pp->num_tile_columns_minus1 = pps->num_tile_columns - 1;        pp->num_tile_rows_minus1    = pps->num_tile_rows - 1;        if (!pps->uniform_spacing_flag) {            for (i = 0; i < pps->num_tile_columns; i++)                pp->column_width_minus1[i] = pps->column_width[i] - 1;            for (i = 0; i < pps->num_tile_rows; i++)                pp->row_height_minus1[i] = pps->row_height[i] - 1;        }    }    pp->diff_cu_qp_delta_depth           = pps->diff_cu_qp_delta_depth;    pp->pps_beta_offset_div2             = pps->beta_offset / 2;    pp->pps_tc_offset_div2               = pps->tc_offset / 2;    pp->log2_parallel_merge_level_minus2 = pps->log2_parallel_merge_level - 2;    pp->CurrPicOrderCntVal               = h->poc;    // fill RefPicList from the DPB    for (i = 0, j = 0; i < FF_ARRAY_ELEMS(pp->RefPicList); i++) {        const HEVCFrame *frame = NULL;        while (!frame && j < FF_ARRAY_ELEMS(h->DPB)) {            if (&h->DPB[j] != current_picture && (h->DPB[j].flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF)))                frame = &h->DPB[j];            j++;        }        if (frame) {            fill_picture_entry(&pp->RefPicList[i], ff_dxva2_get_surface_index(avctx, ctx, frame->frame), !!(frame->flags & HEVC_FRAME_FLAG_LONG_REF));            pp->PicOrderCntValList[i] = frame->poc;        } else {            pp->RefPicList[i].bPicEntry = 0xff;            pp->PicOrderCntValList[i]   = 0;        }    }    #define DO_REF_LIST(ref_idx, ref_list) { /        const RefPicList *rpl = &h->rps[ref_idx]; /        for (i = 0, j = 0; i < FF_ARRAY_ELEMS(pp->ref_list); i++) { /            const HEVCFrame *frame = NULL; /            while (!frame && j < rpl->nb_refs) /                frame = rpl->ref[j++]; /            if (frame) /                pp->ref_list[i] = get_refpic_index(pp, ff_dxva2_get_surface_index(avctx, ctx, frame->frame)); /            else /                pp->ref_list[i] = 0xff; /        } /    }    // Fill short term and long term lists    DO_REF_LIST(ST_CURR_BEF, RefPicSetStCurrBefore);    DO_REF_LIST(ST_CURR_AFT, RefPicSetStCurrAfter);    DO_REF_LIST(LT_CURR, RefPicSetLtCurr);    pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;}
开发者ID:LinuxCao,项目名称:ffmpeg-2.8.4-for-x86-linux,代码行数:101,


示例22: h261_decode_mb

static int h261_decode_mb(H261Context *h){    MpegEncContext *const s = &h->s;    int i, cbp, xy;    cbp = 63;    // Read mba    do {        h->mba_diff = get_vlc2(&s->gb, h261_mba_vlc.table,                               H261_MBA_VLC_BITS, 2);        /* Check for slice end */        /* NOTE: GOB can be empty (no MB data) or exist only of MBA_stuffing */        if (h->mba_diff == MBA_STARTCODE) { // start code            h->gob_start_code_skipped = 1;            return SLICE_END;        }    } while (h->mba_diff == MBA_STUFFING); // stuffing    if (h->mba_diff < 0) {        if (get_bits_left(&s->gb) <= 7)            return SLICE_END;        av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d/n", s->mb_x, s->mb_y);        return SLICE_ERROR;    }    h->mba_diff    += 1;    h->current_mba += h->mba_diff;    if (h->current_mba > MBA_STUFFING)        return SLICE_ERROR;    s->mb_x = ((h->gob_number - 1) % 2) * 11 + ((h->current_mba - 1) % 11);    s->mb_y = ((h->gob_number - 1) / 2) * 3 + ((h->current_mba - 1) / 11);    xy      = s->mb_x + s->mb_y * s->mb_stride;    ff_init_block_index(s);    ff_update_block_index(s);    // Read mtype    h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);    if (h->mtype < 0 || h->mtype >= FF_ARRAY_ELEMS(ff_h261_mtype_map)) {        av_log(s->avctx, AV_LOG_ERROR, "Invalid mtype index %d/n",               h->mtype);        return SLICE_ERROR;    }    h->mtype = ff_h261_mtype_map[h->mtype];    // Read mquant    if (IS_QUANT(h->mtype))        ff_set_qscale(s, get_bits(&s->gb, 5));    s->mb_intra = IS_INTRA4x4(h->mtype);    // Read mv    if (IS_16X16(h->mtype)) {        /* Motion vector data is included for all MC macroblocks. MVD is         * obtained from the macroblock vector by subtracting the vector         * of the preceding macroblock. For this calculation the vector         * of the preceding macroblock is regarded as zero in the         * following three situations:         * 1) evaluating MVD for macroblocks 1, 12 and 23;         * 2) evaluating MVD for macroblocks in which MBA does not represent a difference of 1;         * 3) MTYPE of the previous macroblock was not MC. */        if ((h->current_mba ==  1) || (h->current_mba == 12) ||            (h->current_mba == 23) || (h->mba_diff != 1)) {            h->current_mv_x = 0;            h->current_mv_y = 0;        }        h->current_mv_x = decode_mv_component(&s->gb, h->current_mv_x);        h->current_mv_y = decode_mv_component(&s->gb, h->current_mv_y);    } else {        h->current_mv_x = 0;        h->current_mv_y = 0;    }    // Read cbp    if (HAS_CBP(h->mtype))        cbp = get_vlc2(&s->gb, h261_cbp_vlc.table, H261_CBP_VLC_BITS, 2) + 1;    if (s->mb_intra) {        s->current_picture.mb_type[xy] = MB_TYPE_INTRA;        goto intra;    }    //set motion vectors    s->mv_dir                      = MV_DIR_FORWARD;    s->mv_type                     = MV_TYPE_16X16;    s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;    s->mv[0][0][0]                 = h->current_mv_x * 2; // gets divided by 2 in motion compensation    s->mv[0][0][1]                 = h->current_mv_y * 2;intra:    /* decode each block */    if (s->mb_intra || HAS_CBP(h->mtype)) {        s->dsp.clear_blocks(s->block[0]);        for (i = 0; i < 6; i++) {            if (h261_decode_block(h, s->block[i], i, cbp & 32) < 0)                return SLICE_ERROR;//.........这里部分代码省略.........
开发者ID:smarter,项目名称:libav,代码行数:101,


示例23: FF_ARRAY_ELEMS

    {AV_PIX_FMT_ARGB,    {'A', 8,  'R', 8,  'G', 8, 'B', 8                 }},    {AV_PIX_FMT_BGR24,   {'B', 8,  'G', 8,  'R', 8                         }},    {AV_PIX_FMT_BGRA,    {'B', 8,  'G', 8,  'R', 8, 'A', 8                 }},    {AV_PIX_FMT_RGB24,   {'R', 8,  'G', 8,  'B', 8                         }},    {AV_PIX_FMT_RGB444BE,{'F', 4,  'R', 4,  'G', 4, 'B', 4                 }},    {AV_PIX_FMT_RGB48BE, {'R', 8,  'r', 8,  'G', 8, 'g', 8, 'B', 8, 'b', 8 }},    {AV_PIX_FMT_RGB48BE, {'R', 16, 'G', 16, 'B', 16                        }},    {AV_PIX_FMT_RGB48LE, {'r', 8,  'R', 8,  'g', 8, 'G', 8, 'b', 8, 'B', 8 }},    {AV_PIX_FMT_RGB555BE,{'F', 1,  'R', 5,  'G', 5, 'B', 5                 }},    {AV_PIX_FMT_RGB565BE,{'R', 5,  'G', 6,  'B', 5                         }},    {AV_PIX_FMT_RGBA,    {'R', 8,  'G', 8,  'B', 8, 'A', 8                 }},    {AV_PIX_FMT_PAL8,    {'P', 8                                           }},    {AV_PIX_FMT_GRAY8,   {'A', 8                                           }},};static const int num_pixel_layouts = FF_ARRAY_ELEMS(ff_mxf_pixel_layouts);int ff_mxf_decode_pixel_layout(const char pixel_layout[16], enum AVPixelFormat *pix_fmt){    int x;    for(x = 0; x < num_pixel_layouts; x++) {        if (!memcmp(pixel_layout, ff_mxf_pixel_layouts[x].data, 16)) {            *pix_fmt = ff_mxf_pixel_layouts[x].pix_fmt;            return 0;        }    }    return -1;}
开发者ID:Hero2000,项目名称:CainCamera,代码行数:30,


示例24: rv10_write_header

static int rv10_write_header(AVFormatContext *ctx,                             int data_size, int index_pos){    RMMuxContext *rm = ctx->priv_data;    AVIOContext *s = ctx->pb;    StreamInfo *stream;    unsigned char *data_offset_ptr, *start_ptr;    const char *desc, *mimetype;    int nb_packets, packet_total_size, packet_max_size, size, packet_avg_size, i;    int bit_rate, v, duration, flags, data_pos;    AVDictionaryEntry *tag;    start_ptr = s->buf_ptr;    ffio_wfourcc(s, ".RMF");    avio_wb32(s,18); /* header size */    avio_wb16(s,0);    avio_wb32(s,0);    avio_wb32(s,4 + ctx->nb_streams); /* num headers */    ffio_wfourcc(s,"PROP");    avio_wb32(s, 50);    avio_wb16(s, 0);    packet_max_size = 0;    packet_total_size = 0;    nb_packets = 0;    bit_rate = 0;    duration = 0;    for(i=0;i<ctx->nb_streams;i++) {        StreamInfo *stream = &rm->streams[i];        bit_rate += stream->bit_rate;        if (stream->packet_max_size > packet_max_size)            packet_max_size = stream->packet_max_size;        nb_packets += stream->nb_packets;        packet_total_size += stream->packet_total_size;        /* select maximum duration */        v = (int) (1000.0 * (float)stream->total_frames / stream->frame_rate);        if (v > duration)            duration = v;    }    avio_wb32(s, bit_rate); /* max bit rate */    avio_wb32(s, bit_rate); /* avg bit rate */    avio_wb32(s, packet_max_size);        /* max packet size */    if (nb_packets > 0)        packet_avg_size = packet_total_size / nb_packets;    else        packet_avg_size = 0;    avio_wb32(s, packet_avg_size);        /* avg packet size */    avio_wb32(s, nb_packets);  /* num packets */    avio_wb32(s, duration); /* duration */    avio_wb32(s, BUFFER_DURATION);           /* preroll */    avio_wb32(s, index_pos);           /* index offset */    /* computation of data the data offset */    data_offset_ptr = s->buf_ptr;    avio_wb32(s, 0);           /* data offset : will be patched after */    avio_wb16(s, ctx->nb_streams);    /* num streams */    flags = 1 | 2; /* save allowed & perfect play */    if (!s->seekable)        flags |= 4; /* live broadcast */    avio_wb16(s, flags);    /* comments */    ffio_wfourcc(s,"CONT");    size =  4 * 2 + 10;    for(i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {        tag = av_dict_get(ctx->metadata, ff_rm_metadata[i], NULL, 0);        if(tag) size += strlen(tag->value);    }    avio_wb32(s,size);    avio_wb16(s,0);    for(i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {        tag = av_dict_get(ctx->metadata, ff_rm_metadata[i], NULL, 0);        put_str(s, tag ? tag->value : "");    }    for(i=0;i<ctx->nb_streams;i++) {        int codec_data_size;        stream = &rm->streams[i];        if (stream->enc->codec_type == AVMEDIA_TYPE_AUDIO) {            desc = "The Audio Stream";            mimetype = "audio/x-pn-realaudio";            codec_data_size = 73;        } else {            desc = "The Video Stream";            mimetype = "video/x-pn-realvideo";            codec_data_size = 34;        }        ffio_wfourcc(s,"MDPR");        size = 10 + 9 * 4 + strlen(desc) + strlen(mimetype) + codec_data_size;        avio_wb32(s, size);        avio_wb16(s, 0);        avio_wb16(s, i); /* stream number */        avio_wb32(s, stream->bit_rate); /* max bit rate */        avio_wb32(s, stream->bit_rate); /* avg bit rate */        avio_wb32(s, stream->packet_max_size);        /* max packet size *///.........这里部分代码省略.........
开发者ID:AdamCDunlap,项目名称:hmc-robot-drivers,代码行数:101,


示例25: h264_initialise_ref_list

static void h264_initialise_ref_list(H264Context *h, H264SliceContext *sl){    int i, len;    int j;    if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {        H264Picture *sorted[32];        int cur_poc, list;        int lens[2];        if (FIELD_PICTURE(h))            cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];        else            cur_poc = h->cur_pic_ptr->poc;        for (list = 0; list < 2; list++) {            len  = add_sorted(sorted,       h->short_ref, h->short_ref_count, cur_poc, 1 ^ list);            len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list);            av_assert0(len <= 32);            len  = build_def_list(sl->ref_list[list], FF_ARRAY_ELEMS(sl->ref_list[0]),                                  sorted, len, 0, h->picture_structure);            len += build_def_list(sl->ref_list[list] + len,                                  FF_ARRAY_ELEMS(sl->ref_list[0]) - len,                                  h->long_ref, 16, 1, h->picture_structure);            av_assert0(len <= 32);            if (len < sl->ref_count[list])                memset(&sl->ref_list[list][len], 0, sizeof(H264Ref) * (sl->ref_count[list] - len));            lens[list] = len;        }        if (lens[0] == lens[1] && lens[1] > 1) {            for (i = 0; i < lens[0] &&                        sl->ref_list[0][i].parent->f->buf[0]->buffer ==                        sl->ref_list[1][i].parent->f->buf[0]->buffer; i++);            if (i == lens[0]) {                FFSWAP(H264Ref, sl->ref_list[1][0], sl->ref_list[1][1]);            }        }    } else {        len  = build_def_list(sl->ref_list[0], FF_ARRAY_ELEMS(sl->ref_list[0]),                              h->short_ref, h->short_ref_count, 0, h->picture_structure);        len += build_def_list(sl->ref_list[0] + len,                              FF_ARRAY_ELEMS(sl->ref_list[0]) - len,                              h-> long_ref, 16, 1, h->picture_structure);        av_assert0(len <= 32);        if (len < sl->ref_count[0])            memset(&sl->ref_list[0][len], 0, sizeof(H264Ref) * (sl->ref_count[0] - len));    }#ifdef TRACE    for (i = 0; i < sl->ref_count[0]; i++) {        ff_tlog(h->avctx, "List0: %s fn:%d 0x%p/n",                (sl->ref_list[0][i].parent ? (sl->ref_list[0][i].parent->long_ref ? "LT" : "ST") : "??"),                sl->ref_list[0][i].pic_id,                sl->ref_list[0][i].data[0]);    }    if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {        for (i = 0; i < sl->ref_count[1]; i++) {            ff_tlog(h->avctx, "List1: %s fn:%d 0x%p/n",                    (sl->ref_list[1][i].parent ? (sl->ref_list[1][i].parent->long_ref ? "LT" : "ST") : "??"),                    sl->ref_list[1][i].pic_id,                    sl->ref_list[1][i].data[0]);        }    }#endif    for (j = 0; j<1+(sl->slice_type_nos == AV_PICTURE_TYPE_B); j++) {        for (i = 0; i < sl->ref_count[j]; i++) {            if (sl->ref_list[j][i].parent) {                if (mismatches_ref(h, sl->ref_list[j][i].parent)) {                    av_log(h->avctx, AV_LOG_ERROR, "Discarding mismatching reference/n");                    memset(&sl->ref_list[j][i], 0, sizeof(sl->ref_list[j][i]));                }            }        }    }    for (i = 0; i < sl->list_count; i++)        h->default_ref[i] = sl->ref_list[i][0];}
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:81,


示例26: ff_h264_decode_ref_pic_list_reordering

//.........这里部分代码省略.........                    break;                if (index >= h->ref_count[list]) {                    av_log(h->avctx, AV_LOG_ERROR, "reference count overflow/n");                    return -1;                }                if (reordering_of_pic_nums_idc < 3) {                    if (reordering_of_pic_nums_idc < 2) {                        const unsigned int abs_diff_pic_num = get_ue_golomb(&h->gb) + 1;                        int frame_num;                        if (abs_diff_pic_num > h->max_pic_num) {                            av_log(h->avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow/n");                            return -1;                        }                        if (reordering_of_pic_nums_idc == 0)                            pred -= abs_diff_pic_num;                        else                            pred += abs_diff_pic_num;                        pred &= h->max_pic_num - 1;                        frame_num = pic_num_extract(h, pred, &pic_structure);                        for (i = h->short_ref_count - 1; i >= 0; i--) {                            ref = h->short_ref[i];                            assert(ref->reference);                            assert(!ref->long_ref);                            if (ref->frame_num == frame_num &&                                    (ref->reference & pic_structure))                                break;                        }                        if (i >= 0)                            ref->pic_id = pred;                    } else {                        int long_idx;                        pic_id = get_ue_golomb(&h->gb); //long_term_pic_idx                        long_idx = pic_num_extract(h, pic_id, &pic_structure);                        if (long_idx > 31) {                            av_log(h->avctx, AV_LOG_ERROR, "long_term_pic_idx overflow/n");                            return -1;                        }                        ref = h->long_ref[long_idx];                        assert(!(ref && !ref->reference));                        if (ref && (ref->reference & pic_structure)) {                            ref->pic_id = pic_id;                            assert(ref->long_ref);                            i = 0;                        } else {                            i = -1;                        }                    }                    if (i < 0) {                        av_log(h->avctx, AV_LOG_ERROR, "reference picture missing during reorder/n");                        memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME                    } else {                        for (i = index; i + 1 < h->ref_count[list]; i++) {                            if (ref->long_ref == h->ref_list[list][i].long_ref &&                                    ref->pic_id   == h->ref_list[list][i].pic_id)                                break;                        }                        for (; i > index; i--) {                            COPY_PICTURE(&h->ref_list[list][i], &h->ref_list[list][i - 1]);                        }                        COPY_PICTURE(&h->ref_list[list][index], ref);                        if (FIELD_PICTURE(h)) {                            pic_as_field(&h->ref_list[list][index], pic_structure);                        }                    }                } else {                    av_log(h->avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc/n");                    return -1;                }            }        }    }    for (list = 0; list < h->list_count; list++) {        for (index = 0; index < h->ref_count[list]; index++) {            if (   !h->ref_list[list][index].f.data[0]                    || (!FIELD_PICTURE(h) && (h->ref_list[list][index].reference&3) != 3)) {                int i;                av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture, default is %d/n", h->default_ref_list[list][0].poc);                for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)                    h->last_pocs[i] = INT_MIN;                if (h->default_ref_list[list][0].f.data[0]                        && !(!FIELD_PICTURE(h) && (h->default_ref_list[list][0].reference&3) != 3))                    COPY_PICTURE(&h->ref_list[list][index], &h->default_ref_list[list][0]);                else                    return -1;            }            av_assert0(av_buffer_get_ref_count(h->ref_list[list][index].f.buf[0]) > 0);        }    }    return 0;}
开发者ID:rzr,项目名称:Tizen_Crosswalk,代码行数:101,


示例27: ff_h264_execute_ref_pic_marking

int ff_h264_execute_ref_pic_marking(H264Context *h){    MMCO *mmco = h->mmco;    int mmco_count;    int i, av_uninit(j);    int pps_ref_count[2] = {0};    int current_ref_assigned = 0, err = 0;    H264Picture *av_uninit(pic);    if (!h->explicit_ref_marking)        generate_sliding_window_mmcos(h);    mmco_count = h->nb_mmco;    if ((h->avctx->debug & FF_DEBUG_MMCO) && mmco_count == 0)        av_log(h->avctx, AV_LOG_DEBUG, "no mmco here/n");    for (i = 0; i < mmco_count; i++) {        int av_uninit(structure), av_uninit(frame_num);        if (h->avctx->debug & FF_DEBUG_MMCO)            av_log(h->avctx, AV_LOG_DEBUG, "mmco:%d %d %d/n", h->mmco[i].opcode,                   h->mmco[i].short_pic_num, h->mmco[i].long_arg);        if (mmco[i].opcode == MMCO_SHORT2UNUSED ||            mmco[i].opcode == MMCO_SHORT2LONG) {            frame_num = pic_num_extract(h, mmco[i].short_pic_num, &structure);            pic       = find_short(h, frame_num, &j);            if (!pic) {                if (mmco[i].opcode != MMCO_SHORT2LONG ||                    !h->long_ref[mmco[i].long_arg]    ||                    h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {                    av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure/n");                    err = AVERROR_INVALIDDATA;                }                continue;            }        }        switch (mmco[i].opcode) {        case MMCO_SHORT2UNUSED:            if (h->avctx->debug & FF_DEBUG_MMCO)                av_log(h->avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d/n",                       h->mmco[i].short_pic_num, h->short_ref_count);            remove_short(h, frame_num, structure ^ PICT_FRAME);            break;        case MMCO_SHORT2LONG:                if (h->long_ref[mmco[i].long_arg] != pic)                    remove_long(h, mmco[i].long_arg, 0);                remove_short_at_index(h, j);                h->long_ref[ mmco[i].long_arg ] = pic;                if (h->long_ref[mmco[i].long_arg]) {                    h->long_ref[mmco[i].long_arg]->long_ref = 1;                    h->long_ref_count++;                }            break;        case MMCO_LONG2UNUSED:            j   = pic_num_extract(h, mmco[i].long_arg, &structure);            pic = h->long_ref[j];            if (pic) {                remove_long(h, j, structure ^ PICT_FRAME);            } else if (h->avctx->debug & FF_DEBUG_MMCO)                av_log(h->avctx, AV_LOG_DEBUG, "mmco: unref long failure/n");            break;        case MMCO_LONG:                    // Comment below left from previous code as it is an interesting note.                    /* First field in pair is in short term list or                     * at a different long term index.                     * This is not allowed; see 7.4.3.3, notes 2 and 3.                     * Report the problem and keep the pair where it is,                     * and mark this field valid.                     */            if (h->short_ref[0] == h->cur_pic_ptr) {                av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to short and long at the same time/n");                remove_short_at_index(h, 0);            }            /* make sure the current picture is not already assigned as a long ref */            if (h->cur_pic_ptr->long_ref) {                for (j = 0; j < FF_ARRAY_ELEMS(h->long_ref); j++) {                    if (h->long_ref[j] == h->cur_pic_ptr) {                        if (j != mmco[i].long_arg)                            av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to 2 long term references/n");                        remove_long(h, j, 0);                    }                }            }            if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) {                av_assert0(!h->cur_pic_ptr->long_ref);                remove_long(h, mmco[i].long_arg, 0);                h->long_ref[mmco[i].long_arg]           = h->cur_pic_ptr;                h->long_ref[mmco[i].long_arg]->long_ref = 1;                h->long_ref_count++;            }            h->cur_pic_ptr->reference |= h->picture_structure;            current_ref_assigned = 1;            break;        case MMCO_SET_MAX_LONG://.........这里部分代码省略.........
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:101,


示例28: add_codec

/* add a codec and set the default parameters */static void add_codec(FFServerStream *stream, AVCodecContext *av){    AVStream *st;    if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))        return;    /* compute default parameters */    switch(av->codec_type) {    case AVMEDIA_TYPE_AUDIO:        if (av->bit_rate == 0)            av->bit_rate = 64000;        if (av->sample_rate == 0)            av->sample_rate = 22050;        if (av->channels == 0)            av->channels = 1;        break;    case AVMEDIA_TYPE_VIDEO:        if (av->bit_rate == 0)            av->bit_rate = 64000;        if (av->time_base.num == 0){            av->time_base.den = 5;            av->time_base.num = 1;        }        if (av->width == 0 || av->height == 0) {            av->width = 160;            av->height = 128;        }        /* Bitrate tolerance is less for streaming */        if (av->bit_rate_tolerance == 0)            av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,                      (int64_t)av->bit_rate*av->time_base.num/av->time_base.den);        if (av->qmin == 0)            av->qmin = 3;        if (av->qmax == 0)            av->qmax = 31;        if (av->max_qdiff == 0)            av->max_qdiff = 3;        av->qcompress = 0.5;        av->qblur = 0.5;        if (!av->nsse_weight)            av->nsse_weight = 8;        av->frame_skip_cmp = FF_CMP_DCTMAX;        if (!av->me_method)            av->me_method = ME_EPZS;        /* FIXME: rc_buffer_aggressivity and rc_eq are deprecated */        av->rc_buffer_aggressivity = 1.0;        if (!av->rc_eq)            av->rc_eq = av_strdup("tex^qComp");        if (!av->i_quant_factor)            av->i_quant_factor = -0.8;        if (!av->b_quant_factor)            av->b_quant_factor = 1.25;        if (!av->b_quant_offset)            av->b_quant_offset = 1.25;        if (!av->rc_max_rate)            av->rc_max_rate = av->bit_rate * 2;        if (av->rc_max_rate && !av->rc_buffer_size) {            av->rc_buffer_size = av->rc_max_rate;        }        break;    default:        abort();    }    st = av_mallocz(sizeof(AVStream));    if (!st)        return;    st->codec = av;    stream->streams[stream->nb_streams++] = st;}
开发者ID:Kofktu,项目名称:FFmpeg_2.4,代码行数:79,


示例29: config_out_props

static int config_out_props(AVFilterLink *outlink){    AVFilterContext *ctx = outlink->src;    AVFilterLink *inlink = outlink->src->inputs[0];    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);    TInterlaceContext *tinterlace = ctx->priv;    int i;    tinterlace->vsub = desc->log2_chroma_h;    outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;    outlink->w = inlink->w;    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?        inlink->h*2 : inlink->h;    if (tinterlace->mode == MODE_PAD) {        uint8_t black[4] = { 16, 128, 128, 16 };        int i, ret;        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))            black[0] = black[3] = 0;        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,                             outlink->w, outlink->h, outlink->format, 1);        if (ret < 0)            return ret;        /* fill black picture with black */        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {            int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;            memset(tinterlace->black_data[i], black[i],                   tinterlace->black_linesize[i] * h);        }    }    if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)            && !(tinterlace->mode == MODE_INTERLEAVE_TOP              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d/n",                tinterlace->mode);        tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;    }    tinterlace->preout_time_base = inlink->time_base;    if (tinterlace->mode == MODE_INTERLACEX2) {        tinterlace->preout_time_base.den *= 2;        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){1,2});    } else if (tinterlace->mode != MODE_PAD) {        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){2,1});    }    for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){        if (!av_cmp_q(standard_tbs[i], outlink->time_base))            break;    }    if (i == FF_ARRAY_ELEMS(standard_tbs) ||        (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))        outlink->time_base = tinterlace->preout_time_base;    if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {        tinterlace->lowpass_line = lowpass_line_c;        if (ARCH_X86)            ff_tinterlace_init_x86(tinterlace);    }    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d/n",           tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",           inlink->h, outlink->h);    return 0;}
开发者ID:63n,项目名称:FFmpeg,代码行数:68,


示例30: ff_h264_decode_seq_parameter_set

int ff_h264_decode_seq_parameter_set(H264Context *h){    MpegEncContext * const s = &h->s;    int profile_idc, level_idc, constraint_set_flags = 0;    unsigned int sps_id;    int i;    SPS *sps;    profile_idc= get_bits(&s->gb, 8);    constraint_set_flags |= get_bits1(&s->gb) << 0;   //constraint_set0_flag    constraint_set_flags |= get_bits1(&s->gb) << 1;   //constraint_set1_flag    constraint_set_flags |= get_bits1(&s->gb) << 2;   //constraint_set2_flag    constraint_set_flags |= get_bits1(&s->gb) << 3;   //constraint_set3_flag    get_bits(&s->gb, 4); // reserved    level_idc= get_bits(&s->gb, 8);    sps_id= get_ue_golomb_31(&s->gb);    if(sps_id >= MAX_SPS_COUNT) {        av_log(h->s.avctx, AV_LOG_ERROR, "sps_id (%d) out of range/n", sps_id);        return -1;    }    sps= av_mallocz(sizeof(SPS));    if(sps == NULL)        return -1;    sps->time_offset_length = 24;    sps->profile_idc= profile_idc;    sps->constraint_set_flags = constraint_set_flags;    sps->level_idc= level_idc;    memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4));    memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));    sps->scaling_matrix_present = 0;    if(sps->profile_idc >= 100){ //high profile        sps->chroma_format_idc= get_ue_golomb_31(&s->gb);        if(sps->chroma_format_idc == 3)            sps->residual_color_transform_flag = get_bits1(&s->gb);        sps->bit_depth_luma   = get_ue_golomb(&s->gb) + 8;        sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;        sps->transform_bypass = get_bits1(&s->gb);        decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8);    }else{        sps->chroma_format_idc= 1;        sps->bit_depth_luma   = 8;        sps->bit_depth_chroma = 8;    }    sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4;    sps->poc_type= get_ue_golomb_31(&s->gb);    if(sps->poc_type == 0){ //FIXME #define        sps->log2_max_poc_lsb= get_ue_golomb(&s->gb) + 4;    } else if(sps->poc_type == 1){//FIXME #define        sps->delta_pic_order_always_zero_flag= get_bits1(&s->gb);        sps->offset_for_non_ref_pic= get_se_golomb(&s->gb);        sps->offset_for_top_to_bottom_field= get_se_golomb(&s->gb);        sps->poc_cycle_length                = get_ue_golomb(&s->gb);        if((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)){            av_log(h->s.avctx, AV_LOG_ERROR, "poc_cycle_length overflow %u/n", sps->poc_cycle_length);            goto fail;        }        for(i=0; i<sps->poc_cycle_length; i++)            sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb);    }else if(sps->poc_type != 2){        av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d/n", sps->poc_type);        goto fail;    }    sps->ref_frame_count= get_ue_golomb_31(&s->gb);    if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count >= 32U){        av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames/n");        goto fail;    }    sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb);    sps->mb_width = get_ue_golomb(&s->gb) + 1;    sps->mb_height= get_ue_golomb(&s->gb) + 1;    if((unsigned)sps->mb_width >= INT_MAX/16 || (unsigned)sps->mb_height >= INT_MAX/16 ||       av_image_check_size(16*sps->mb_width, 16*sps->mb_height, 0, h->s.avctx)){        av_log(h->s.avctx, AV_LOG_ERROR, "mb_width/height overflow/n");        goto fail;    }    sps->frame_mbs_only_flag= get_bits1(&s->gb);    if(!sps->frame_mbs_only_flag)        sps->mb_aff= get_bits1(&s->gb);    else        sps->mb_aff= 0;    sps->direct_8x8_inference_flag= get_bits1(&s->gb);    if(!sps->frame_mbs_only_flag && !sps->direct_8x8_inference_flag){        av_log(h->s.avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference/n");        goto fail;    }#ifndef ALLOW_INTERLACE    if(sps->mb_aff)        av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time./n");#endif//.........这里部分代码省略.........
开发者ID:AndyA,项目名称:ffmbc,代码行数:101,



注:本文中的FF_ARRAY_ELEMS函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ FF_CEIL_RSHIFT函数代码示例
C++ FFT函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。