您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ FF_CEIL_RSHIFT函数代码示例

51自学网 2021-06-01 20:41:09
  C++
这篇教程C++ FF_CEIL_RSHIFT函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中FF_CEIL_RSHIFT函数的典型用法代码示例。如果您正苦于以下问题:C++ FF_CEIL_RSHIFT函数的具体用法?C++ FF_CEIL_RSHIFT怎么用?C++ FF_CEIL_RSHIFT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了FF_CEIL_RSHIFT函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: config_input

static int config_input(AVFilterLink *inlink){    AVFilterContext *ctx = inlink->dst;    MaskedMergeContext *s = ctx->priv;    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    int vsub, hsub;    s->nb_planes = av_pix_fmt_count_planes(inlink->format);    hsub = desc->log2_chroma_w;    vsub = desc->log2_chroma_h;    s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, vsub);    s->height[0] = s->height[3] = inlink->h;    s->width[1]  = s->width[2]  = FF_CEIL_RSHIFT(inlink->w, hsub);    s->width[0]  = s->width[3]  = inlink->w;    s->depth = desc->comp[0].depth;    s->max = 1 << s->depth;    s->half = s->max / 2;    if (desc->comp[0].depth == 8)        s->maskedmerge = maskedmerge8;    else        s->maskedmerge = maskedmerge16;    return 0;}
开发者ID:ChristianFrisson,项目名称:FFmpeg,代码行数:27,


示例2: chr_h_scale

static int chr_h_scale(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, int sliceH){    FilterContext *instance = desc->instance;    int srcW = FF_CEIL_RSHIFT(desc->src->width, desc->src->h_chr_sub_sample);    int dstW = FF_CEIL_RSHIFT(desc->dst->width, desc->dst->h_chr_sub_sample);    int xInc = instance->xInc;    uint8_t ** src1 = desc->src->plane[1].line;    uint8_t ** dst1 = desc->dst->plane[1].line;    uint8_t ** src2 = desc->src->plane[2].line;    uint8_t ** dst2 = desc->dst->plane[2].line;    int src_pos1 = sliceY - desc->src->plane[1].sliceY;    int dst_pos1 = sliceY - desc->dst->plane[1].sliceY;    int src_pos2 = sliceY - desc->src->plane[2].sliceY;    int dst_pos2 = sliceY - desc->dst->plane[2].sliceY;    int i;    for (i = 0; i < sliceH; ++i) {        if (c->hcscale_fast) {            c->hcscale_fast(c, (uint16_t*)dst1[dst_pos1+i], (uint16_t*)dst2[dst_pos2+i], dstW, src1[src_pos1+i], src2[src_pos2+i], srcW, xInc);        } else {            c->hcScale(c, (uint16_t*)dst1[dst_pos1+i], dstW, src1[src_pos1+i], instance->filter, instance->filter_pos, instance->filter_size);            c->hcScale(c, (uint16_t*)dst2[dst_pos2+i], dstW, src2[src_pos2+i], instance->filter, instance->filter_pos, instance->filter_size);        }        if (c->chrConvertRange)            c->chrConvertRange((uint16_t*)dst1[dst_pos1+i], (uint16_t*)dst2[dst_pos2+i], dstW);        desc->dst->plane[1].sliceH += 1;        desc->dst->plane[2].sliceH += 1;    }    return sliceH;}
开发者ID:LittleKey,项目名称:FFmpeg,代码行数:35,


示例3: config_input

static int config_input(AVFilterLink *inlink){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    AVFilterContext *ctx = inlink->dst;    ATADenoiseContext *s = ctx->priv;    int depth;    s->nb_planes = desc->nb_components;    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    depth = desc->comp[0].depth;    if (depth == 8)        s->filter_slice = filter_slice8;    else        s->filter_slice = filter_slice16;    s->thra[0] = s->fthra[0] * (1 << depth) - 1;    s->thra[1] = s->fthra[1] * (1 << depth) - 1;    s->thra[2] = s->fthra[2] * (1 << depth) - 1;    s->thrb[0] = s->fthrb[0] * (1 << depth) - 1;    s->thrb[1] = s->fthrb[1] * (1 << depth) - 1;    s->thrb[2] = s->fthrb[2] * (1 << depth) - 1;    return 0;}
开发者ID:zhenghuadai,项目名称:FFmpeg,代码行数:29,


示例4: filter_slice_chroma

static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,                               int nb_jobs){    FadeContext *s = ctx->priv;    AVFrame *frame = arg;    int i, j, plane;    const int width = FF_CEIL_RSHIFT(frame->width, s->hsub);    const int height= FF_CEIL_RSHIFT(frame->height, s->vsub);    int slice_start = (height *  jobnr   ) / nb_jobs;    int slice_end   = (height * (jobnr+1)) / nb_jobs;    for (plane = 1; plane < 3; plane++) {        for (i = slice_start; i < slice_end; i++) {            uint8_t *p = frame->data[plane] + i * frame->linesize[plane];            for (j = 0; j < width; j++) {                /* 8421367 = ((128 << 1) + 1) << 15. It is an integer                 * representation of 128.5. The .5 is for rounding                 * purposes. */                *p = ((*p - 128) * s->factor + 8421367) >> 16;                p++;            }        }    }    return 0;}
开发者ID:309746069,项目名称:FFmpeg,代码行数:26,


示例5: config_input

static int config_input(AVFilterLink *inlink){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    VectorscopeContext *s = inlink->dst->priv;    if (s->mode == GRAY)        s->pd = 0;    else {        if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))            s->pd = 0;        else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0))            s->pd = 1;        else if ((s->x == 0 && s->y == 1) || (s->x == 1 && s->y == 0))            s->pd = 2;    }    switch (inlink->format) {    case AV_PIX_FMT_GBRAP:    case AV_PIX_FMT_GBRP:        s->bg_color = black_gbrp_color;        break;    default:        s->bg_color = black_yuva_color;    }    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    return 0;}
开发者ID:quanxinglong,项目名称:FFmpeg,代码行数:32,


示例6: filter_frame

static int filter_frame(AVFilterLink *inlink, AVFrame *in){    PixdescTestContext *priv = inlink->dst->priv;    AVFilterLink *outlink    = inlink->dst->outputs[0];    AVFrame *out;    int i, c, w = inlink->w, h = inlink->h;    const int cw = FF_CEIL_RSHIFT(w, priv->pix_desc->log2_chroma_w);    const int ch = FF_CEIL_RSHIFT(h, priv->pix_desc->log2_chroma_h);    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);    if (!out) {        av_frame_free(&in);        return AVERROR(ENOMEM);    }    av_frame_copy_props(out, in);    for (i = 0; i < 4; i++) {        const int h1 = i == 1 || i == 2 ? ch : h;        if (out->data[i]) {            uint8_t *data = out->data[i] +                (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h1-1));            memset(data, 0, FFABS(out->linesize[i]) * h1);        }    }    /* copy palette */    if (priv->pix_desc->flags & AV_PIX_FMT_FLAG_PAL ||        priv->pix_desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)        memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);    for (c = 0; c < priv->pix_desc->nb_components; c++) {        const int w1 = c == 1 || c == 2 ? cw : w;        const int h1 = c == 1 || c == 2 ? ch : h;        for (i = 0; i < h1; i++) {            av_read_image_line(priv->line,                               (void*)in->data,                               in->linesize,                               priv->pix_desc,                               0, i, c, w1, 0);            av_write_image_line(priv->line,                                out->data,                                out->linesize,                                priv->pix_desc,                                0, i, c, w1);        }    }    av_frame_free(&in);    return ff_filter_frame(outlink, out);}
开发者ID:markjreed,项目名称:vice-emu,代码行数:53,


示例7: config_input_ref

static int config_input_ref(AVFilterLink *inlink){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    AVFilterContext *ctx  = inlink->dst;    PSNRContext *s = ctx->priv;    int j;    s->nb_components = desc->nb_components;    if (ctx->inputs[0]->w != ctx->inputs[1]->w ||        ctx->inputs[0]->h != ctx->inputs[1]->h) {        av_log(ctx, AV_LOG_ERROR, "Width and heigth of input videos must be same./n");        return AVERROR(EINVAL);    }    if (ctx->inputs[0]->format != ctx->inputs[1]->format) {        av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format./n");        return AVERROR(EINVAL);    }    switch (inlink->format) {    case AV_PIX_FMT_YUV410P:    case AV_PIX_FMT_YUV411P:    case AV_PIX_FMT_YUV420P:    case AV_PIX_FMT_YUV422P:    case AV_PIX_FMT_YUV440P:    case AV_PIX_FMT_YUV444P:    case AV_PIX_FMT_YUVA420P:    case AV_PIX_FMT_YUVA422P:    case AV_PIX_FMT_YUVA444P:        s->max[0] = 235;        s->max[3] = 255;        s->max[1] = s->max[2] = 240;        break;    default:        s->max[0] = s->max[1] = s->max[2] = s->max[3] = 255;    }    s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;    s->comps[0] = s->is_rgb ? 'r' : 'y' ;    s->comps[1] = s->is_rgb ? 'g' : 'u' ;    s->comps[2] = s->is_rgb ? 'b' : 'v' ;    s->comps[3] = 'a';    for (j = 0; j < s->nb_components; j++)        s->average_max += s->max[j];    s->average_max /= s->nb_components;    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    return 0;}
开发者ID:StephanieSpanjian,项目名称:FFmpeg,代码行数:53,


示例8: filter_frame

static int filter_frame(AVFilterLink *inlink, AVFrame *in){    DelogoContext *s = inlink->dst->priv;    AVFilterLink *outlink = inlink->dst->outputs[0];    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    AVFrame *out;    int hsub0 = desc->log2_chroma_w;    int vsub0 = desc->log2_chroma_h;    int direct = 0;    int plane;    AVRational sar;    if (av_frame_is_writable(in)) {        direct = 1;        out = in;    } else {        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);        if (!out) {            av_frame_free(&in);            return AVERROR(ENOMEM);        }        av_frame_copy_props(out, in);    }    sar = in->sample_aspect_ratio;    /* Assume square pixels if SAR is unknown */    if (!sar.num)        sar.num = sar.den = 1;    for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {        int hsub = plane == 1 || plane == 2 ? hsub0 : 0;        int vsub = plane == 1 || plane == 2 ? vsub0 : 0;        apply_delogo(out->data[plane], out->linesize[plane],                     in ->data[plane], in ->linesize[plane],                     FF_CEIL_RSHIFT(inlink->w, hsub),                     FF_CEIL_RSHIFT(inlink->h, vsub),                     sar, s->x>>hsub, s->y>>vsub,                     /* Up and left borders were rounded down, inject lost bits                      * into width and height to avoid error accumulation */                     FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),                     FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),                     s->band>>FFMIN(hsub, vsub),                     s->show, direct);    }    if (!direct)        av_frame_free(&in);    return ff_filter_frame(outlink, out);}
开发者ID:VFR-maniac,项目名称:ffmpeg,代码行数:52,


示例9: copy_picture_field

/** * Copy picture field from src to dst. * * @param src_field copy from upper, lower field or both * @param interleave leave a padding line between each copied line * @param dst_field copy to upper or lower field, *        only meaningful when interleave is selected * @param flags context flags */static inlinevoid copy_picture_field(TInterlaceContext *tinterlace,                        uint8_t *dst[4], int dst_linesize[4],                        const uint8_t *src[4], int src_linesize[4],                        enum AVPixelFormat format, int w, int src_h,                        int src_field, int interleave, int dst_field,                        int flags){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);    int plane, vsub = desc->log2_chroma_h;    int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;    int h;    for (plane = 0; plane < desc->nb_components; plane++) {        int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;        int cols  = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(    w, desc->log2_chroma_w) : w;        int linesize = av_image_get_linesize(format, w, plane);        uint8_t *dstp = dst[plane];        const uint8_t *srcp = src[plane];        if (linesize < 0)            return;        lines = (lines + (src_field == FIELD_UPPER)) / k;        if (src_field == FIELD_LOWER)            srcp += src_linesize[plane];        if (interleave && dst_field == FIELD_LOWER)            dstp += dst_linesize[plane];        if (flags & TINTERLACE_FLAG_VLPF) {            // Low-pass filtering is required when creating an interlaced destination from            // a progressive source which contains high-frequency vertical detail.            // Filtering will reduce interlace 'twitter' and Moire patterning.            int srcp_linesize = src_linesize[plane] * k;            int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);            for (h = lines; h > 0; h--) {                const uint8_t *srcp_above = srcp - src_linesize[plane];                const uint8_t *srcp_below = srcp + src_linesize[plane];                if (h == lines) srcp_above = srcp; // there is no line above                if (h == 1) srcp_below = srcp;     // there is no line below                tinterlace->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);                dstp += dstp_linesize;                srcp += srcp_linesize;            }        } else {            av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),                            srcp, src_linesize[plane]*k, linesize, lines);        }    }}
开发者ID:Brainiarc7,项目名称:ffmpeg-nvenc-plain,代码行数:59,


示例10: config_props

static int config_props(AVFilterLink *inlink){    FlipContext *s = inlink->dst->priv;    const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);    const int hsub = pix_desc->log2_chroma_w;    const int vsub = pix_desc->log2_chroma_h;    av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, hsub);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);    return 0;}
开发者ID:markjreed,项目名称:vice-emu,代码行数:15,


示例11: config_input_ref

static int config_input_ref(AVFilterLink *inlink){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    AVFilterContext *ctx  = inlink->dst;    PSNRContext *s = ctx->priv;    unsigned sum;    int j;    s->nb_components = desc->nb_components;    if (ctx->inputs[0]->w != ctx->inputs[1]->w ||            ctx->inputs[0]->h != ctx->inputs[1]->h) {        av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same./n");        return AVERROR(EINVAL);    }    if (ctx->inputs[0]->format != ctx->inputs[1]->format) {        av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format./n");        return AVERROR(EINVAL);    }    s->max[0] = (1 << (desc->comp[0].depth_minus1 + 1)) - 1;    s->max[1] = (1 << (desc->comp[1].depth_minus1 + 1)) - 1;    s->max[2] = (1 << (desc->comp[2].depth_minus1 + 1)) - 1;    s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;    s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;    s->comps[0] = s->is_rgb ? 'r' : 'y' ;    s->comps[1] = s->is_rgb ? 'g' : 'u' ;    s->comps[2] = s->is_rgb ? 'b' : 'v' ;    s->comps[3] = 'a';    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    sum = 0;    for (j = 0; j < s->nb_components; j++)        sum += s->planeheight[j] * s->planewidth[j];    for (j = 0; j < s->nb_components; j++) {        s->planeweight[j] = (double) s->planeheight[j] * s->planewidth[j] / sum;        s->average_max += s->max[j] * s->planeweight[j];    }    s->dsp.sse_line = desc->comp[0].depth_minus1 > 7 ? sse_line_16bit : sse_line_8bit;    if (ARCH_X86)        ff_psnr_init_x86(&s->dsp, desc->comp[0].depth_minus1 + 1);    return 0;}
开发者ID:nanflower,项目名称:FFmpeg,代码行数:48,


示例12: smv_img_pnt

static inline void smv_img_pnt(uint8_t *dst_data[4], uint8_t *src_data[4],                               const int src_linesizes[4],                               enum PixelFormat pix_fmt, int width, int height,                               int nlines){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);    int i, planes_nb = 0;    if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL)        return;    for (i = 0; i < desc->nb_components; i++)        planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);    for (i = 0; i < planes_nb; i++) {        int h = height;        if (i == 1 || i == 2) {            h = FF_CEIL_RSHIFT(height, desc->log2_chroma_h);        }        smv_img_pnt_plane(&dst_data[i], src_data[i],            src_linesizes[i], h, nlines);    }    if (desc->flags & AV_PIX_FMT_FLAG_PAL ||        desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)        dst_data[1] = src_data[1];}
开发者ID:r-type,项目名称:vice-libretro,代码行数:26,


示例13: chr_convert

static int chr_convert(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, int sliceH){    int srcW = FF_CEIL_RSHIFT(desc->src->width, desc->src->h_chr_sub_sample);    ColorContext * instance = desc->instance;    uint32_t * pal = instance->pal;    int sp0 = (sliceY - (desc->src->plane[0].sliceY >> desc->src->v_chr_sub_sample)) << desc->src->v_chr_sub_sample;    int sp1 = sliceY - desc->src->plane[1].sliceY;    int i;    desc->dst->plane[1].sliceY = sliceY;    desc->dst->plane[1].sliceH = sliceH;    desc->dst->plane[2].sliceY = sliceY;    desc->dst->plane[2].sliceH = sliceH;    for (i = 0; i < sliceH; ++i) {        const uint8_t * src[4] = { desc->src->plane[0].line[sp0+i],                        desc->src->plane[1].line[sp1+i],                        desc->src->plane[2].line[sp1+i],                        desc->src->plane[3].line[sp0+i]};        uint8_t * dst1 = desc->dst->plane[1].line[i];        uint8_t * dst2 = desc->dst->plane[2].line[i];        if (c->chrToYV12) {            c->chrToYV12(dst1, dst2, src[0], src[1], src[2], srcW, pal);        } else if (c->readChrPlanar) {            c->readChrPlanar(dst1, dst2, src, srcW, c->input_rgb2yuv_table);        }    }    return sliceH;}
开发者ID:LittleKey,项目名称:FFmpeg,代码行数:32,


示例14: config_input

static int config_input(AVFilterLink *inlink){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    VectorscopeContext *s = inlink->dst->priv;    s->is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB);    s->size = 1 << desc->comp[0].depth;    s->mult = s->size / 256;    if (s->mode == GRAY && s->is_yuv)        s->pd = 0;    else {        if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))            s->pd = 0;        else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0))            s->pd = 1;        else if ((s->x == 0 && s->y == 1) || (s->x == 1 && s->y == 0))            s->pd = 2;    }    if (s->size == 256)        s->vectorscope = vectorscope8;    else        s->vectorscope = vectorscope16;    switch (inlink->format) {    case AV_PIX_FMT_GBRP10:    case AV_PIX_FMT_GBRP9:    case AV_PIX_FMT_GBRAP:    case AV_PIX_FMT_GBRP:        s->bg_color = black_gbrp_color;        break;    default:        s->bg_color = black_yuva_color;    }    s->hsub = desc->log2_chroma_w;    s->vsub = desc->log2_chroma_h;    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    return 0;}
开发者ID:Crawping,项目名称:chromium_extract,代码行数:45,


示例15: config_out_props

static int config_out_props(AVFilterLink *outlink){    AVFilterContext *ctx = outlink->src;    AVFilterLink *inlink = outlink->src->inputs[0];    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);    TInterlaceContext *tinterlace = ctx->priv;    tinterlace->vsub = desc->log2_chroma_h;    outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;    outlink->w = inlink->w;    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?        inlink->h*2 : inlink->h;    if (tinterlace->mode == MODE_PAD) {        uint8_t black[4] = { 16, 128, 128, 16 };        int i, ret;        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))            black[0] = black[3] = 0;        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,                             outlink->w, outlink->h, outlink->format, 1);        if (ret < 0)            return ret;        /* fill black picture with black */        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {            int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;            memset(tinterlace->black_data[i], black[i],                   tinterlace->black_linesize[i] * h);        }    }    if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)            && !(tinterlace->mode == MODE_INTERLEAVE_TOP              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d/n",                tinterlace->mode);        tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;    }    if (tinterlace->mode == MODE_INTERLACEX2) {        outlink->time_base.num = inlink->time_base.num;        outlink->time_base.den = inlink->time_base.den * 2;        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});    } else if (tinterlace->mode != MODE_PAD) {        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});    }    if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {        tinterlace->lowpass_line = lowpass_line_c;        if (ARCH_X86)            ff_tinterlace_init_x86(tinterlace);    }    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d/n",           tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",           inlink->h, outlink->h);    return 0;}
开发者ID:Brainiarc7,项目名称:ffmpeg-nvenc-plain,代码行数:57,


示例16: filter_frame

static int filter_frame(AVFilterLink *inlink, AVFrame *in){    AVFilterContext *ctx  = inlink->dst;    HQDN3DContext *s = ctx->priv;    AVFilterLink *outlink = ctx->outputs[0];    AVFrame *out;    int direct, c;    if (av_frame_is_writable(in) && !ctx->is_disabled) {        direct = 1;        out = in;    } else {        direct = 0;        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);        if (!out) {            av_frame_free(&in);            return AVERROR(ENOMEM);        }        av_frame_copy_props(out, in);    }    for (c = 0; c < 3; c++) {        denoise(s, in->data[c], out->data[c],                s->line, &s->frame_prev[c],                FF_CEIL_RSHIFT(in->width,  (!!c * s->hsub)),                FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)),                in->linesize[c], out->linesize[c],                s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],                s->coefs[c ? CHROMA_TMP     : LUMA_TMP]);    }    if (ctx->is_disabled) {        av_frame_free(&out);        return ff_filter_frame(outlink, in);    }    if (!direct)        av_frame_free(&in);    return ff_filter_frame(outlink, out);}
开发者ID:18565773346,项目名称:android-h264-decoder,代码行数:43,


示例17: request_frame

static int request_frame(AVFilterLink *outlink){    MPTestContext *test = outlink->src->priv;    AVFrame *picref;    int w = WIDTH, h = HEIGHT,        cw = FF_CEIL_RSHIFT(w, test->hsub), ch = FF_CEIL_RSHIFT(h, test->vsub);    unsigned int frame = outlink->frame_count;    enum test_type tt = test->test;    int i;    if (test->max_pts >= 0 && test->pts > test->max_pts)        return AVERROR_EOF;    picref = ff_get_video_buffer(outlink, w, h);    if (!picref)        return AVERROR(ENOMEM);    picref->pts = test->pts++;    // clean image    for (i = 0; i < h; i++)        memset(picref->data[0] + i*picref->linesize[0], 0, w);    for (i = 0; i < ch; i++) {        memset(picref->data[1] + i*picref->linesize[1], 128, cw);        memset(picref->data[2] + i*picref->linesize[2], 128, cw);    }    if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */        tt = (frame/30)%(TEST_NB-1);    switch (tt) {    case TEST_DC_LUMA:       dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break;    case TEST_DC_CHROMA:     dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break;    case TEST_FREQ_LUMA:   freq_test(picref->data[0], picref->linesize[0], frame%30); break;    case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break;    case TEST_AMP_LUMA:     amp_test(picref->data[0], picref->linesize[0], frame%30); break;    case TEST_AMP_CHROMA:   amp_test(picref->data[1], picref->linesize[1], frame%30); break;    case TEST_CBP:          cbp_test(picref->data   , picref->linesize   , frame%30); break;    case TEST_MV:            mv_test(picref->data[0], picref->linesize[0], frame%30); break;    case TEST_RING1:      ring1_test(picref->data[0], picref->linesize[0], frame%30); break;    case TEST_RING2:      ring2_test(picref->data[0], picref->linesize[0], frame%30); break;    }    return ff_filter_frame(outlink, picref);}
开发者ID:r-type,项目名称:vice-libretro,代码行数:43,


示例18: process_frame

static int process_frame(FFFrameSync *fs){    AVFilterContext *ctx = fs->parent;    AVFilterLink *outlink = ctx->outputs[0];    StackContext *s = fs->opaque;    AVFrame **in = s->frames;    AVFrame *out;    int i, p, ret, offset[4] = { 0 };    for (i = 0; i < s->nb_inputs; i++) {        if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)            return ret;    }    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);    if (!out)        return AVERROR(ENOMEM);    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);    for (i = 0; i < s->nb_inputs; i++) {        AVFilterLink *inlink = ctx->inputs[i];        int linesize[4];        int height[4];        if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) {            av_frame_free(&out);            return ret;        }        height[1] = height[2] = FF_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);        height[0] = height[3] = inlink->h;        for (p = 0; p < s->nb_planes; p++) {            if (s->is_vertical) {                av_image_copy_plane(out->data[p] + offset[p] * out->linesize[p],                                    out->linesize[p],                                    in[i]->data[p],                                    in[i]->linesize[p],                                    linesize[p], height[p]);                offset[p] += height[p];            } else {                av_image_copy_plane(out->data[p] + offset[p],                                    out->linesize[p],                                    in[i]->data[p],                                    in[i]->linesize[p],                                    linesize[p], height[p]);                offset[p] += linesize[p];            }        }    }    return ff_filter_frame(outlink, out);}
开发者ID:LinuxCao,项目名称:ffmpeg-2.8.4-for-x86-linux,代码行数:53,


示例19: config_input

static int config_input(AVFilterLink *inlink){    AVFilterContext *ctx = inlink->dst;    PullupContext *s = ctx->priv;    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    int mp = s->metric_plane;    s->nb_planes = av_pix_fmt_count_planes(inlink->format);    if (mp + 1 > s->nb_planes) {        av_log(ctx, AV_LOG_ERROR, "input format does not have such plane/n");        return AVERROR(EINVAL);    }    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);    s->planewidth[0]  = s->planewidth[3]  = inlink->w;    s->metric_w      = (s->planewidth[mp]  - ((s->junk_left + s->junk_right)  << 3)) >> 3;    s->metric_h      = (s->planeheight[mp] - ((s->junk_top  + s->junk_bottom) << 1)) >> 3;    s->metric_offset = (s->junk_left << 3) + (s->junk_top << 1) * s->planewidth[mp];    s->metric_length = s->metric_w * s->metric_h;    av_log(ctx, AV_LOG_DEBUG, "w: %d h: %d/n", s->metric_w, s->metric_h);    av_log(ctx, AV_LOG_DEBUG, "offset: %d length: %d/n", s->metric_offset, s->metric_length);    s->head = make_field_queue(s, 8);    if (!s->head)        return AVERROR(ENOMEM);    s->diff = diff_c;    s->comb = comb_c;    s->var  = var_c;    if (ARCH_X86)        ff_pullup_init_x86(s);    return 0;}
开发者ID:Bjelijah,项目名称:EcamTurnH265,代码行数:39,


示例20: get_video_buffer

static int get_video_buffer(AVFrame *frame, int align){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);    int ret, i;    if (!desc)        return AVERROR(EINVAL);    if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)        return ret;    if (!frame->linesize[0]) {        for(i=1; i<=align; i+=i) {            ret = av_image_fill_linesizes(frame->linesize, frame->format,                                          FFALIGN(frame->width, i));            if (ret < 0)                return ret;            if (!(frame->linesize[0] & (align-1)))                break;        }        for (i = 0; i < 4 && frame->linesize[i]; i++)            frame->linesize[i] = FFALIGN(frame->linesize[i], align);    }    for (i = 0; i < 4 && frame->linesize[i]; i++) {        int h = FFALIGN(frame->height, 32);        if (i == 1 || i == 2)            h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);        frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);        if (!frame->buf[i])            goto fail;        frame->data[i] = frame->buf[i]->data;    }    if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {        av_buffer_unref(&frame->buf[1]);        frame->buf[1] = av_buffer_alloc(1024);        if (!frame->buf[1])            goto fail;        frame->data[1] = frame->buf[1]->data;    }    frame->extended_data = frame->data;    return 0;fail:    av_frame_unref(frame);    return AVERROR(ENOMEM);}
开发者ID:venkatarajasekhar,项目名称:Qt,代码行数:51,


示例21: config_input

static int config_input(AVFilterLink *inlink){    PhaseContext *s = inlink->dst->priv;    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    int ret;    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)        return ret;    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);    s->planeheight[0] = s->planeheight[3] = inlink->h;    s->nb_planes = av_pix_fmt_count_planes(inlink->format);    return 0;}
开发者ID:Ced2911,项目名称:FFmpeg,代码行数:16,


示例22: filter_frame

static int filter_frame(AVFilterLink *link, AVFrame *frame){    FlipContext *flip = link->dst->priv;    int i;    for (i = 0; i < 4; i ++) {        int vsub = i == 1 || i == 2 ? flip->vsub : 0;        int height = FF_CEIL_RSHIFT(link->h, vsub);        if (frame->data[i]) {            frame->data[i] += (height - 1) * frame->linesize[i];            frame->linesize[i] = -frame->linesize[i];        }    }    return ff_filter_frame(link->dst->outputs[0], frame);}
开发者ID:Armada651,项目名称:FFmpeg,代码行数:17,


示例23: copy_picture_field

static void copy_picture_field(AVFrame *src_frame, AVFrame *dst_frame,                               AVFilterLink *inlink, enum FieldType field_type,                               int lowpass){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    int vsub = desc->log2_chroma_h;    int plane, i, j;    for (plane = 0; plane < desc->nb_components; plane++) {        int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;        int linesize = av_image_get_linesize(inlink->format, inlink->w, plane);        uint8_t *dstp = dst_frame->data[plane];        const uint8_t *srcp = src_frame->data[plane];        av_assert0(linesize >= 0);        lines = (lines + (field_type == FIELD_UPPER)) / 2;        if (field_type == FIELD_LOWER)            srcp += src_frame->linesize[plane];        if (field_type == FIELD_LOWER)            dstp += dst_frame->linesize[plane];        if (lowpass) {            int srcp_linesize = src_frame->linesize[plane] * 2;            int dstp_linesize = dst_frame->linesize[plane] * 2;            for (j = lines; j > 0; j--) {                const uint8_t *srcp_above = srcp - src_frame->linesize[plane];                const uint8_t *srcp_below = srcp + src_frame->linesize[plane];                if (j == lines)                    srcp_above = srcp; // there is no line above                if (j == 1)                    srcp_below = srcp; // there is no line below                for (i = 0; i < linesize; i++) {                    // this calculation is an integer representation of                    // '0.5 * current + 0.25 * above + 0.25 * below'                    // '1 +' is for rounding.                    dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;                }                dstp += dstp_linesize;                srcp += srcp_linesize;            }        } else {            av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,                                srcp, src_frame->linesize[plane] * 2,                                linesize, lines);        }    }
开发者ID:plumbojumbo,项目名称:FFmpeg,代码行数:46,


示例24: copy_picture_field

static void copy_picture_field(InterlaceContext *s,                               AVFrame *src_frame, AVFrame *dst_frame,                               AVFilterLink *inlink, enum FieldType field_type,                               int lowpass){    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    int hsub = desc->log2_chroma_w;    int vsub = desc->log2_chroma_h;    int plane, j;    for (plane = 0; plane < desc->nb_components; plane++) {        int cols  = (plane == 1 || plane == 2) ? -(-inlink->w) >> hsub : inlink->w;        int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;        uint8_t *dstp = dst_frame->data[plane];        const uint8_t *srcp = src_frame->data[plane];        av_assert0(cols >= 0 || lines >= 0);        lines = (lines + (field_type == FIELD_UPPER)) / 2;        if (field_type == FIELD_LOWER) {            srcp += src_frame->linesize[plane];            dstp += dst_frame->linesize[plane];        }        if (lowpass) {            int srcp_linesize = src_frame->linesize[plane] * 2;            int dstp_linesize = dst_frame->linesize[plane] * 2;            for (j = lines; j > 0; j--) {                const uint8_t *srcp_above = srcp - src_frame->linesize[plane];                const uint8_t *srcp_below = srcp + src_frame->linesize[plane];                if (j == lines)                    srcp_above = srcp; // there is no line above                if (j == 1)                    srcp_below = srcp; // there is no line below                s->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);                dstp += dstp_linesize;                srcp += srcp_linesize;            }        } else {            av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,                                srcp, src_frame->linesize[plane] * 2,                                cols, lines);        }    }}
开发者ID:BossKing,项目名称:FFmpeg,代码行数:44,


示例25: ff_opencl_deshake_process_inout_buf

int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out){    int ret = 0;    AVFilterLink *link = ctx->inputs[0];    DeshakeContext *deshake = ctx->priv;    const int hshift = av_pix_fmt_desc_get(link->format)->log2_chroma_h;    int chroma_height = FF_CEIL_RSHIFT(link->h, hshift);    if ((!deshake->opencl_ctx.cl_inbuf) || (!deshake->opencl_ctx.cl_outbuf)) {        deshake->opencl_ctx.in_plane_size[0]  = (in->linesize[0] * in->height);        deshake->opencl_ctx.in_plane_size[1]  = (in->linesize[1] * chroma_height);        deshake->opencl_ctx.in_plane_size[2]  = (in->linesize[2] * chroma_height);        deshake->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);        deshake->opencl_ctx.out_plane_size[1] = (out->linesize[1] * chroma_height);        deshake->opencl_ctx.out_plane_size[2] = (out->linesize[2] * chroma_height);        deshake->opencl_ctx.cl_inbuf_size  = deshake->opencl_ctx.in_plane_size[0] +                                             deshake->opencl_ctx.in_plane_size[1] +                                             deshake->opencl_ctx.in_plane_size[2];        deshake->opencl_ctx.cl_outbuf_size = deshake->opencl_ctx.out_plane_size[0] +                                             deshake->opencl_ctx.out_plane_size[1] +                                             deshake->opencl_ctx.out_plane_size[2];        if (!deshake->opencl_ctx.cl_inbuf) {            ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_inbuf,                                            deshake->opencl_ctx.cl_inbuf_size,                                            CL_MEM_READ_ONLY, NULL);            if (ret < 0)                return ret;        }        if (!deshake->opencl_ctx.cl_outbuf) {            ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_outbuf,                                            deshake->opencl_ctx.cl_outbuf_size,                                            CL_MEM_READ_WRITE, NULL);            if (ret < 0)                return ret;        }    }    ret = av_opencl_buffer_write_image(deshake->opencl_ctx.cl_inbuf,                                 deshake->opencl_ctx.cl_inbuf_size,                                 0, in->data,deshake->opencl_ctx.in_plane_size,                                 deshake->opencl_ctx.plane_num);    if(ret < 0)        return ret;    return ret;}
开发者ID:Bjelijah,项目名称:EcamTurnH265,代码行数:44,


示例26: filter_frame

static int filter_frame(AVFilterLink *inlink, AVFrame *frame){    AVFilterContext *ctx = inlink->dst;    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);    uint32_t plane_checksum[4] = {0}, checksum = 0;    int i, plane, vsub = desc->log2_chroma_h;    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);        uint8_t *data = frame->data[plane];        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;        if (linesize < 0)            return linesize;        for (i = 0; i < h; i++) {            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);            checksum = av_adler32_update(checksum, data, linesize);            data += frame->linesize[plane];        }    }    av_log(ctx, AV_LOG_INFO,           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "           "checksum:%08X plane_checksum:[%08X",           inlink->frame_count,           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),           desc->name,           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,           frame->width, frame->height,           !frame->interlaced_frame ? 'P' :         /* Progressive  */           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */           frame->key_frame,           av_get_picture_type_char(frame->pict_type),           checksum, plane_checksum[0]);    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);    av_log(ctx, AV_LOG_INFO, "]/n");    return ff_filter_frame(inlink->dst->outputs[0], frame);}
开发者ID:StephanieSpanjian,项目名称:FFmpeg,代码行数:43,


示例27: ff_opencl_unsharp_process_inout_buf

int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out){    int ret = 0;    AVFilterLink *link = ctx->inputs[0];    UnsharpContext *unsharp = ctx->priv;    int ch = FF_CEIL_RSHIFT(link->h, unsharp->vsub);    if ((!unsharp->opencl_ctx.cl_inbuf) || (!unsharp->opencl_ctx.cl_outbuf)) {        unsharp->opencl_ctx.in_plane_size[0]  = (in->linesize[0] * in->height);        unsharp->opencl_ctx.in_plane_size[1]  = (in->linesize[1] * ch);        unsharp->opencl_ctx.in_plane_size[2]  = (in->linesize[2] * ch);        unsharp->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);        unsharp->opencl_ctx.out_plane_size[1] = (out->linesize[1] * ch);        unsharp->opencl_ctx.out_plane_size[2] = (out->linesize[2] * ch);        unsharp->opencl_ctx.cl_inbuf_size  = unsharp->opencl_ctx.in_plane_size[0] +                                             unsharp->opencl_ctx.in_plane_size[1] +                                             unsharp->opencl_ctx.in_plane_size[2];        unsharp->opencl_ctx.cl_outbuf_size = unsharp->opencl_ctx.out_plane_size[0] +                                             unsharp->opencl_ctx.out_plane_size[1] +                                             unsharp->opencl_ctx.out_plane_size[2];        if (!unsharp->opencl_ctx.cl_inbuf) {            ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_inbuf,                                          unsharp->opencl_ctx.cl_inbuf_size,                                          CL_MEM_READ_ONLY, NULL);            if (ret < 0)                return ret;        }        if (!unsharp->opencl_ctx.cl_outbuf) {            ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_outbuf,                                          unsharp->opencl_ctx.cl_outbuf_size,                                          CL_MEM_READ_WRITE, NULL);            if (ret < 0)                return ret;        }    }    return av_opencl_buffer_write_image(unsharp->opencl_ctx.cl_inbuf,                                        unsharp->opencl_ctx.cl_inbuf_size,                                        0, in->data, unsharp->opencl_ctx.in_plane_size,                                        unsharp->opencl_ctx.plane_num);}
开发者ID:18565773346,项目名称:android-h264-decoder,代码行数:40,


示例28: ff_get_video_buffer

static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h){    FlipContext *flip = link->dst->priv;    AVFrame *frame;    int i;    frame = ff_get_video_buffer(link->dst->outputs[0], w, h);    if (!frame)        return NULL;    for (i = 0; i < 4; i ++) {        int vsub = i == 1 || i == 2 ? flip->vsub : 0;        int height = FF_CEIL_RSHIFT(h, vsub);        if (frame->data[i]) {            frame->data[i] += (height - 1) * frame->linesize[i];            frame->linesize[i] = -frame->linesize[i];        }    }    return frame;}
开发者ID:Armada651,项目名称:FFmpeg,代码行数:22,


示例29: filter_frame

static int filter_frame(AVFilterLink *inlink, AVFrame *in){    AVFilterContext *ctx  = inlink->dst;    FlipContext *s     = ctx->priv;    AVFilterLink *outlink = ctx->outputs[0];    AVFrame *out;    uint8_t *inrow, *outrow;    int i, j, plane, step;    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);    if (!out) {        av_frame_free(&in);        return AVERROR(ENOMEM);    }    av_frame_copy_props(out, in);    /* copy palette if required */    if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)        memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);    for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {        const int width  = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w;        const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h;        step = s->max_step[plane];        outrow = out->data[plane];        inrow  = in ->data[plane] + (width - 1) * step;        for (i = 0; i < height; i++) {            switch (step) {            case 1:                for (j = 0; j < width; j++)                    outrow[j] = inrow[-j];            break;            case 2:            {                uint16_t *outrow16 = (uint16_t *)outrow;                uint16_t * inrow16 = (uint16_t *) inrow;                for (j = 0; j < width; j++)                    outrow16[j] = inrow16[-j];            }            break;            case 3:            {                uint8_t *in  =  inrow;                uint8_t *out = outrow;                for (j = 0; j < width; j++, out += 3, in -= 3) {                    int32_t v = AV_RB24(in);                    AV_WB24(out, v);                }            }            break;            case 4:            {                uint32_t *outrow32 = (uint32_t *)outrow;                uint32_t * inrow32 = (uint32_t *) inrow;                for (j = 0; j < width; j++)                    outrow32[j] = inrow32[-j];            }            break;            default:                for (j = 0; j < width; j++)                    memcpy(outrow + j*step, inrow - j*step, step);            }            inrow  += in ->linesize[plane];            outrow += out->linesize[plane];        }    }    av_frame_free(&in);    return ff_filter_frame(outlink, out);}
开发者ID:Armada651,项目名称:FFmpeg,代码行数:76,


示例30: filter_frame

static int filter_frame(AVFilterLink *link, AVFrame *in){    DeshakeContext *deshake = link->dst->priv;    AVFilterLink *outlink = link->dst->outputs[0];    AVFrame *out;    Transform t = {{0},0}, orig = {{0},0};    float matrix_y[9], matrix_uv[9];    float alpha = 2.0 / deshake->refcount;    char tmp[256];    int ret = 0;    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);    const int chroma_width  = FF_CEIL_RSHIFT(link->w, desc->log2_chroma_w);    const int chroma_height = FF_CEIL_RSHIFT(link->h, desc->log2_chroma_h);    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);    if (!out) {        av_frame_free(&in);        return AVERROR(ENOMEM);    }    av_frame_copy_props(out, in);    if (CONFIG_OPENCL && deshake->opencl) {        ret = ff_opencl_deshake_process_inout_buf(link->dst,in, out);        if (ret < 0)            return ret;    }    if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {        // Find the most likely global motion for the current frame        find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);    } else {        uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];        uint8_t *src2 = in->data[0];        deshake->cx = FFMIN(deshake->cx, link->w);        deshake->cy = FFMIN(deshake->cy, link->h);        if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;        if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;        // Quadword align right margin        deshake->cw &= ~15;        src1 += deshake->cy * in->linesize[0] + deshake->cx;        src2 += deshake->cy * in->linesize[0] + deshake->cx;        find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);    }    // Copy transform so we can output it later to compare to the smoothed value    orig.vec.x = t.vec.x;    orig.vec.y = t.vec.y;    orig.angle = t.angle;    orig.zoom = t.zoom;    // Generate a one-sided moving exponential average    deshake->avg.vec.x = alpha * t.vec.x + (1.0 - alpha) * deshake->avg.vec.x;    deshake->avg.vec.y = alpha * t.vec.y + (1.0 - alpha) * deshake->avg.vec.y;    deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;    deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;    // Remove the average from the current motion to detect the motion that    // is not on purpose, just as jitter from bumping the camera    t.vec.x -= deshake->avg.vec.x;    t.vec.y -= deshake->avg.vec.y;    t.angle -= deshake->avg.angle;    t.zoom -= deshake->avg.zoom;    // Invert the motion to undo it    t.vec.x *= -1;    t.vec.y *= -1;    t.angle *= -1;    // Write statistics to file    if (deshake->fp) {        snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f/n", orig.vec.x, deshake->avg.vec.x, t.vec.x, orig.vec.y, deshake->avg.vec.y, t.vec.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);        fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);    }    // Turn relative current frame motion into absolute by adding it to the    // last absolute motion    t.vec.x += deshake->last.vec.x;    t.vec.y += deshake->last.vec.y;    t.angle += deshake->last.angle;    t.zoom += deshake->last.zoom;    // Shrink motion by 10% to keep things centered in the camera frame    t.vec.x *= 0.9;    t.vec.y *= 0.9;    t.angle *= 0.9;    // Store the last absolute motion information    deshake->last.vec.x = t.vec.x;    deshake->last.vec.y = t.vec.y;    deshake->last.angle = t.angle;    deshake->last.zoom = t.zoom;    // Generate a luma transformation matrix    avfilter_get_matrix(t.vec.x, t.vec.y, t.angle, 1.0 + t.zoom / 100.0, matrix_y);//.........这里部分代码省略.........
开发者ID:Crawping,项目名称:chromium_extract,代码行数:101,



注:本文中的FF_CEIL_RSHIFT函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ FG函数代码示例
C++ FF_ARRAY_ELEMS函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。