您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ FFABS函数代码示例

51自学网 2021-06-01 20:41:00
  C++
这篇教程C++ FFABS函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中FFABS函数的典型用法代码示例。如果您正苦于以下问题:C++ FFABS函数的具体用法?C++ FFABS怎么用?C++ FFABS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了FFABS函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: build_ordered_chapter_timeline

void build_ordered_chapter_timeline(struct MPContext *mpctx){    struct MPOpts *opts = mpctx->opts;    if (!opts->ordered_chapters) {        mp_msg(MSGT_CPLAYER, MSGL_INFO, "File uses ordered chapters, but "               "you have disabled support for them. Ignoring./n");        return;    }    mp_msg(MSGT_CPLAYER, MSGL_INFO, "File uses ordered chapters, will build "           "edit timeline./n");    struct demuxer *demuxer = mpctx->demuxer;    struct matroska_data *m = &demuxer->matroska_data;    // +1 because sources/uid_map[0] is original file even if all chapters    // actually use other sources and need separate entries    struct demuxer **sources = talloc_array_ptrtype(NULL, sources,                                                    m->num_ordered_chapters+1);    sources[0] = mpctx->demuxer;    unsigned char (*uid_map)[16] = talloc_array_ptrtype(NULL, uid_map,                                                 m->num_ordered_chapters + 1);    int num_sources = 1;    memcpy(uid_map[0], m->segment_uid, 16);    for (int i = 0; i < m->num_ordered_chapters; i++) {        struct matroska_chapter *c = m->ordered_chapters + i;        if (!c->has_segment_uid)            memcpy(c->segment_uid, m->segment_uid, 16);        for (int j = 0; j < num_sources; j++)            if (!memcmp(c->segment_uid, uid_map[j], 16))                goto found1;        memcpy(uid_map[num_sources], c->segment_uid, 16);        sources[num_sources] = NULL;        num_sources++;    found1:        ;    }    num_sources = find_ordered_chapter_sources(mpctx, sources, num_sources,                                               uid_map);    // +1 for terminating chapter with start time marking end of last real one    struct timeline_part *timeline = talloc_array_ptrtype(NULL, timeline,                                                  m->num_ordered_chapters + 1);    struct chapter *chapters = talloc_array_ptrtype(NULL, chapters,                                                    m->num_ordered_chapters);    uint64_t starttime = 0;    uint64_t missing_time = 0;    int part_count = 0;    int num_chapters = 0;    uint64_t prev_part_offset = 0;    for (int i = 0; i < m->num_ordered_chapters; i++) {        struct matroska_chapter *c = m->ordered_chapters + i;        int j;        for (j = 0; j < num_sources; j++) {            if (!memcmp(c->segment_uid, uid_map[j], 16))                goto found2;        }        missing_time += c->end - c->start;        continue;    found2:;        /* Only add a separate part if the time or file actually changes.         * Matroska files have chapter divisions that are redundant from         * timeline point of view because the same chapter structure is used         * both to specify the timeline and for normal chapter information.         * Removing a missing inserted external chapter can also cause this.         * We allow for a configurable fudge factor because of files which         * specify chapter end times that are one frame too early;         * we don't want to try seeking over a one frame gap. */        int64_t join_diff = c->start - starttime - prev_part_offset;        if (part_count == 0            || FFABS(join_diff) > opts->chapter_merge_threshold * 1000000            || sources[j] != timeline[part_count - 1].source) {            timeline[part_count].source = sources[j];            timeline[part_count].start = starttime / 1e9;            timeline[part_count].source_start = c->start / 1e9;            prev_part_offset = c->start - starttime;            part_count++;        } else if (part_count > 0 && join_diff) {            /* Chapter was merged at an inexact boundary;             * adjust timestamps to match. */            mp_msg(MSGT_CPLAYER, MSGL_V, "Merging timeline part %d with "                   "offset %g ms./n", i, join_diff / 1e6);            starttime += join_diff;        }        chapters[num_chapters].start = starttime / 1e9;        chapters[num_chapters].name = talloc_strdup(chapters, c->name);        starttime += c->end - c->start;        num_chapters++;    }    timeline[part_count].start = starttime / 1e9;    talloc_free(uid_map);    if (!part_count) {        // None of the parts come from the file itself???//.........这里部分代码省略.........
开发者ID:ArcherSeven,项目名称:mpv,代码行数:101,


示例2: mpeg1_encode_sequence_header

/* put sequence header if needed */static void mpeg1_encode_sequence_header(MpegEncContext *s){        unsigned int vbv_buffer_size;        unsigned int fps, v;        int i;        uint64_t time_code;        float best_aspect_error= 1E10;        float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio);        int constraint_parameter_flag;        if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)        if (s->current_picture.f.key_frame) {            AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];            /* mpeg1 header repeated every gop */            put_header(s, SEQ_START_CODE);            put_sbits(&s->pb, 12, s->width  & 0xFFF);            put_sbits(&s->pb, 12, s->height & 0xFFF);            for(i=1; i<15; i++){                float error= aspect_ratio;                if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <=1)                    error-= 1.0/ff_mpeg1_aspect[i];                else                    error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width;                error= FFABS(error);                if(error < best_aspect_error){                    best_aspect_error= error;                    s->aspect_ratio_info= i;                }            }            put_bits(&s->pb, 4, s->aspect_ratio_info);            put_bits(&s->pb, 4, s->frame_rate_index);            if(s->avctx->rc_max_rate){                v = (s->avctx->rc_max_rate + 399) / 400;                if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)                    v = 0x3ffff;            }else{                v= 0x3FFFF;            }            if(s->avctx->rc_buffer_size)                vbv_buffer_size = s->avctx->rc_buffer_size;            else                /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */                vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;            vbv_buffer_size= (vbv_buffer_size + 16383) / 16384;            put_sbits(&s->pb, 18, v);            put_bits(&s->pb, 1, 1); /* marker */            put_sbits(&s->pb, 10, vbv_buffer_size);            constraint_parameter_flag=                s->width <= 768 && s->height <= 576 &&                s->mb_width * s->mb_height <= 396 &&                s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 &&                framerate.num <= framerate.den*30 &&                s->avctx->me_range && s->avctx->me_range < 128 &&                vbv_buffer_size <= 20 &&                v <= 1856000/400 &&                s->codec_id == AV_CODEC_ID_MPEG1VIDEO;            put_bits(&s->pb, 1, constraint_parameter_flag);            ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);            ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);            if(s->codec_id == AV_CODEC_ID_MPEG2VIDEO){                put_header(s, EXT_START_CODE);                put_bits(&s->pb, 4, 1); //seq ext                put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */                put_bits(&s->pb, 3, s->avctx->profile); //profile                put_bits(&s->pb, 4, s->avctx->level); //level                put_bits(&s->pb, 1, s->progressive_sequence);                put_bits(&s->pb, 2, s->chroma_format);                put_bits(&s->pb, 2, s->width >>12);                put_bits(&s->pb, 2, s->height>>12);                put_bits(&s->pb, 12, v>>18); //bitrate ext                put_bits(&s->pb, 1, 1); //marker                put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext                put_bits(&s->pb, 1, s->low_delay);                put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n                put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d            }
开发者ID:Armada651,项目名称:FFmpeg,代码行数:94,


示例3: search_for_quantizers_faac

//.........这里部分代码省略.........                    if (sce->ics.num_windows == 1 && maxval < t) {                        maxval  = t;                        peakpos = start+i;                    }                }            }            if (sce->ics.num_windows == 1) {                start2 = FFMAX(peakpos - 2, start2);                end2   = FFMIN(peakpos + 3, end2);            } else {                start2 -= start;                end2   -= start;            }            start += size;            thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband);            t   = 1.0 - (1.0 * start2 / last);            uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075);        }    }    memset(sce->sf_idx, 0, sizeof(sce->sf_idx));    abs_pow34_v(s->scoefs, sce->coeffs, 1024);    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {        start = w*128;        for (g = 0;  g < sce->ics.num_swb; g++) {            const float *coefs  = sce->coeffs + start;            const float *scaled = s->scoefs   + start;            const int size      = sce->ics.swb_sizes[g];            int scf, prev_scf, step;            int min_scf = -1, max_scf = 256;            float curdiff;            if (maxq[w*16+g] < 21.544) {                sce->zeroes[w*16+g] = 1;                start += size;                continue;            }            sce->zeroes[w*16+g] = 0;            scf  = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218);            step = 16;            for (;;) {                float dist = 0.0f;                int quant_max;                for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {                    int b;                    dist += quantize_band_cost(s, coefs + w2*128,                                               scaled + w2*128,                                               sce->ics.swb_sizes[g],                                               scf,                                               ESC_BT,                                               lambda,                                               INFINITY,                                               &b);                    dist -= b;                }                dist *= 1.0f / 512.0f / lambda;                quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[POW_SF2_ZERO - scf + SCALE_ONE_POS - SCALE_DIV_512]);                if (quant_max >= 8191) { // too much, return to the previous quantizer                    sce->sf_idx[w*16+g] = prev_scf;                    break;                }                prev_scf = scf;                curdiff = fabsf(dist - uplim[w*16+g]);                if (curdiff <= 1.0f)                    step = 0;                else                    step = log2f(curdiff);                if (dist > uplim[w*16+g])                    step = -step;                scf += step;                scf = av_clip_uint8(scf);                step = scf - prev_scf;                if (FFABS(step) <= 1 || (step > 0 && scf >= max_scf) || (step < 0 && scf <= min_scf)) {                    sce->sf_idx[w*16+g] = av_clip(scf, min_scf, max_scf);                    break;                }                if (step > 0)                    min_scf = prev_scf;                else                    max_scf = prev_scf;            }            start += size;        }    }    minq = sce->sf_idx[0] ? sce->sf_idx[0] : INT_MAX;    for (i = 1; i < 128; i++) {        if (!sce->sf_idx[i])            sce->sf_idx[i] = sce->sf_idx[i-1];        else            minq = FFMIN(minq, sce->sf_idx[i]);    }    if (minq == INT_MAX)        minq = 0;    minq = FFMIN(minq, SCALE_MAX_POS);    maxsf = FFMIN(minq + SCALE_MAX_DIFF, SCALE_MAX_POS);    for (i = 126; i >= 0; i--) {        if (!sce->sf_idx[i])            sce->sf_idx[i] = sce->sf_idx[i+1];        sce->sf_idx[i] = av_clip(sce->sf_idx[i], minq, maxsf);    }}
开发者ID:Arcen,项目名称:FFmpeg,代码行数:101,


示例4: mp3_write_xing

/* * Write an empty XING header and initialize respective data. */static void mp3_write_xing(AVFormatContext *s){    MP3Context       *mp3 = s->priv_data;    AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec;    AVDictionaryEntry *enc = av_dict_get(s->streams[mp3->audio_stream_idx]->metadata, "encoder", NULL, 0);    AVIOContext *dyn_ctx;    int32_t        header;    MPADecodeHeader  mpah;    int srate_idx, i, channels;    int bitrate_idx;    int best_bitrate_idx;    int best_bitrate_error = INT_MAX;    int ret;    int ver = 0;    int lsf, bytes_needed;    if (!s->pb->seekable || !mp3->write_xing)        return;    for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) {        const uint16_t base_freq = avpriv_mpa_freq_tab[i];        if      (codec->sample_rate == base_freq)     ver = 0x3; // MPEG 1        else if (codec->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2        else if (codec->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5        else continue;        srate_idx = i;        break;    }    if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) {        av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing "               "header./n");        return;    }    switch (codec->channels) {    case 1:  channels = MPA_MONO;                                          break;    case 2:  channels = MPA_STEREO;                                        break;    default: av_log(s, AV_LOG_WARNING, "Unsupported number of channels, "                    "not writing Xing header./n");             return;    }    /* dummy MPEG audio header */    header  =  0xff                                  << 24; // sync    header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/    header |= (srate_idx << 2) << 8;    header |= channels << 6;    lsf = !((header & (1 << 20) && header & (1 << 19)));    mp3->xing_offset = xing_offtbl[ver != 3][channels == 1] + 4;    bytes_needed     = mp3->xing_offset + XING_SIZE;    for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) {        int bit_rate = 1000 * avpriv_mpa_bitrate_tab[lsf][3 - 1][bitrate_idx];        int error    = FFABS(bit_rate - codec->bit_rate);        if (error < best_bitrate_error){            best_bitrate_error = error;            best_bitrate_idx   = bitrate_idx;        }    }    for (bitrate_idx = best_bitrate_idx; bitrate_idx < 15; bitrate_idx++) {        int32_t mask = bitrate_idx << (4 + 8);        header |= mask;        avpriv_mpegaudio_decode_header(&mpah, header);        if (bytes_needed <= mpah.frame_size)            break;        header &= ~mask;    }    ret = avio_open_dyn_buf(&dyn_ctx);    if (ret < 0)        return;    avio_wb32(dyn_ctx, header);    avpriv_mpegaudio_decode_header(&mpah, header);    av_assert0(mpah.frame_size >= bytes_needed);    ffio_fill(dyn_ctx, 0, mp3->xing_offset - 4);    ffio_wfourcc(dyn_ctx, "Xing");    avio_wb32(dyn_ctx, 0x01 | 0x02 | 0x04 | 0x08);  // frames / size / TOC / vbr scale    mp3->size = mpah.frame_size;    mp3->want = 1;    avio_wb32(dyn_ctx, 0);  // frames    avio_wb32(dyn_ctx, 0);  // size//.........这里部分代码省略.........
开发者ID:founderznd,项目名称:libav,代码行数:101,


示例5: ff_h263_encode_picture_header

void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number){    int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;    int best_clock_code=1;    int best_divisor=60;    int best_error= INT_MAX;    if(s->h263_plus){        for(i=0; i<2; i++){            int div, error;            div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);            div= av_clip(div, 1, 127);            error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);            if(error < best_error){                best_error= error;                best_divisor= div;                best_clock_code= i;            }        }    }    s->custom_pcf= best_clock_code!=1 || best_divisor!=60;    coded_frame_rate= 1800000;    coded_frame_rate_base= (1000+best_clock_code)*best_divisor;    avpriv_align_put_bits(&s->pb);    /* Update the pointer to last GOB */    s->ptr_lastgob = put_bits_ptr(&s->pb);    put_bits(&s->pb, 22, 0x20); /* PSC */    temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp                         (coded_frame_rate_base * (int64_t)s->avctx->time_base.den);    put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */    put_bits(&s->pb, 1, 1);     /* marker */    put_bits(&s->pb, 1, 0);     /* h263 id */    put_bits(&s->pb, 1, 0);     /* split screen off */    put_bits(&s->pb, 1, 0);     /* camera  off */    put_bits(&s->pb, 1, 0);     /* freeze picture release off */    format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);    if (!s->h263_plus) {        /* H.263v1 */        put_bits(&s->pb, 3, format);        put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));        /* By now UMV IS DISABLED ON H.263v1, since the restrictions        of H.263v1 UMV implies to check the predicted MV after        calculation of the current MB to see if we're on the limits */        put_bits(&s->pb, 1, 0);         /* Unrestricted Motion Vector: off */        put_bits(&s->pb, 1, 0);         /* SAC: off */        put_bits(&s->pb, 1, s->obmc);   /* Advanced Prediction */        put_bits(&s->pb, 1, 0);         /* only I/P frames, no PB frame */        put_bits(&s->pb, 5, s->qscale);        put_bits(&s->pb, 1, 0);         /* Continuous Presence Multipoint mode: off */    } else {        int ufep=1;        /* H.263v2 */        /* H.263 Plus PTYPE */        put_bits(&s->pb, 3, 7);        put_bits(&s->pb,3,ufep); /* Update Full Extended PTYPE */        if (format == 8)            put_bits(&s->pb,3,6); /* Custom Source Format */        else            put_bits(&s->pb, 3, format);        put_bits(&s->pb,1, s->custom_pcf);        put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */        put_bits(&s->pb,1,0); /* SAC: off */        put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */        put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */        put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */        put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */        put_bits(&s->pb,1,0); /* Reference Picture Selection: off */        put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */        put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */        put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */        put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */        put_bits(&s->pb,3,0); /* Reserved */        put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P);        put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */        put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */        put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */        put_bits(&s->pb,2,0); /* Reserved */        put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */        /* This should be here if PLUSPTYPE */        put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */        if (format == 8) {            /* Custom Picture Format (CPFMT) */            s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);            put_bits(&s->pb,4,s->aspect_ratio_info);            put_bits(&s->pb,9,(s->width >> 2) - 1);            put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */            put_bits(&s->pb,9,(s->height >> 2));            if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){                put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);//.........这里部分代码省略.........
开发者ID:xkfz007,项目名称:libav,代码行数:101,


示例6: decode_frame

static int decode_frame(AVCodecContext *avctx,                            void *data, int *got_frame,                            AVPacket *avpkt){    AnsiContext *s = avctx->priv_data;    uint8_t *buf = avpkt->data;    int buf_size = avpkt->size;    const uint8_t *buf_end   = buf+buf_size;    int ret, i, count;    ret = ff_reget_buffer(avctx, s->frame);    if (ret < 0){        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed/n");        return ret;    }    if (!avctx->frame_number) {        memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0]));        memset(s->frame->data[1], 0, AVPALETTE_SIZE);    }    s->frame->pict_type           = AV_PICTURE_TYPE_I;    s->frame->palette_has_changed = 1;    memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);    while(buf < buf_end) {        switch(s->state) {        case STATE_NORMAL:            switch (buf[0]) {            case 0x00: //NUL            case 0x07: //BEL            case 0x1A: //SUB                /* ignore */                break;            case 0x08: //BS                s->x = FFMAX(s->x - 1, 0);                break;            case 0x09: //HT                i = s->x / FONT_WIDTH;                count = ((i + 8) & ~7) - i;                for (i = 0; i < count; i++)                    draw_char(avctx, ' ');                break;            case 0x0A: //LF                hscroll(avctx);            case 0x0D: //CR                s->x = 0;                break;            case 0x0C: //FF                erase_screen(avctx);                break;            case 0x1B: //ESC                s->state = STATE_ESCAPE;                break;            default:                draw_char(avctx, buf[0]);            }            break;        case STATE_ESCAPE:            if (buf[0] == '[') {                s->state   = STATE_CODE;                s->nb_args = 0;                s->args[0] = 0;            } else {                s->state = STATE_NORMAL;                draw_char(avctx, 0x1B);                continue;            }            break;        case STATE_CODE:            switch(buf[0]) {            case '0': case '1': case '2': case '3': case '4':            case '5': case '6': case '7': case '8': case '9':                if (s->nb_args < MAX_NB_ARGS)                    s->args[s->nb_args] = s->args[s->nb_args] * 10 + buf[0] - '0';                break;            case ';':                s->nb_args++;                if (s->nb_args < MAX_NB_ARGS)                    s->args[s->nb_args] = 0;                break;            case 'M':                s->state = STATE_MUSIC_PREAMBLE;                break;            case '=': case '?':                /* ignore */                break;            default:                if (s->nb_args > MAX_NB_ARGS)                    av_log(avctx, AV_LOG_WARNING, "args overflow (%i)/n", s->nb_args);                if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args])                    s->nb_args++;                if ((ret = execute_code(avctx, buf[0])) < 0)                    return ret;                s->state = STATE_NORMAL;            }            break;        case STATE_MUSIC_PREAMBLE:            if (buf[0] == 0x0E || buf[0] == 0x1B)                s->state = STATE_NORMAL;            /* ignore music data *///.........这里部分代码省略.........
开发者ID:AVLeo,项目名称:libav,代码行数:101,


示例7: decode_frame

static int decode_frame(AVCodecContext *avctx, void *data,                        int *got_frame, AVPacket *avpkt){    const uint8_t *buf = avpkt->data;    int buf_size = avpkt->size;    C93DecoderContext * const c93 = avctx->priv_data;    AVFrame * const newpic = c93->pictures[c93->currentpic];    AVFrame * const oldpic = c93->pictures[c93->currentpic^1];    GetByteContext gb;    uint8_t *out;    int stride, ret, i, x, y, b, bt = 0;    if ((ret = ff_set_dimensions(avctx, WIDTH, HEIGHT)) < 0)        return ret;    c93->currentpic ^= 1;    if ((ret = ff_reget_buffer(avctx, newpic)) < 0)        return ret;    stride = newpic->linesize[0];    bytestream2_init(&gb, buf, buf_size);    b = bytestream2_get_byte(&gb);    if (b & C93_FIRST_FRAME) {        newpic->pict_type = AV_PICTURE_TYPE_I;        newpic->key_frame = 1;    } else {        newpic->pict_type = AV_PICTURE_TYPE_P;        newpic->key_frame = 0;    }    for (y = 0; y < HEIGHT; y += 8) {        out = newpic->data[0] + y * stride;        for (x = 0; x < WIDTH; x += 8) {            uint8_t *copy_from = oldpic->data[0];            unsigned int offset, j;            uint8_t cols[4], grps[4];            C93BlockType block_type;            if (!bt)                bt = bytestream2_get_byte(&gb);            block_type= bt & 0x0F;            switch (block_type) {            case C93_8X8_FROM_PREV:                offset = bytestream2_get_le16(&gb);                if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0)                    return ret;                break;            case C93_4X4_FROM_CURR:                copy_from = newpic->data[0];            case C93_4X4_FROM_PREV:                for (j = 0; j < 8; j += 4) {                    for (i = 0; i < 8; i += 4) {                        int offset = bytestream2_get_le16(&gb);                        int from_x = offset % WIDTH;                        int from_y = offset / WIDTH;                        if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&                            (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {                            avpriv_request_sample(avctx, "block overlap %d %d %d %d/n", from_x, x+i, from_y, y+j);                            return AVERROR_INVALIDDATA;                        }                        if ((ret = copy_block(avctx, &out[j*stride+i],                                              copy_from, offset, 4, stride)) < 0)                            return ret;                    }                }                break;            case C93_8X8_2COLOR:                bytestream2_get_buffer(&gb, cols, 2);                for (i = 0; i < 8; i++) {                    draw_n_color(out + i*stride, stride, 8, 1, 1, cols,                                     NULL, bytestream2_get_byte(&gb));                }                break;            case C93_4X4_2COLOR:            case C93_4X4_4COLOR:            case C93_4X4_4COLOR_GRP:                for (j = 0; j < 8; j += 4) {                    for (i = 0; i < 8; i += 4) {                        if (block_type == C93_4X4_2COLOR) {                            bytestream2_get_buffer(&gb, cols, 2);                            draw_n_color(out + i + j*stride, stride, 4, 4,                                    1, cols, NULL, bytestream2_get_le16(&gb));                        } else if (block_type == C93_4X4_4COLOR) {                            bytestream2_get_buffer(&gb, cols, 4);                            draw_n_color(out + i + j*stride, stride, 4, 4,                                    2, cols, NULL, bytestream2_get_le32(&gb));                        } else {                            bytestream2_get_buffer(&gb, grps, 4);                            draw_n_color(out + i + j*stride, stride, 4, 4,                                    1, cols, grps, bytestream2_get_le16(&gb));                        }                    }                }//.........这里部分代码省略.........
开发者ID:markjreed,项目名称:vice-emu,代码行数:101,


示例8: filter_slice16

static int filter_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs){    ATADenoiseContext *s = ctx->priv;    ThreadData *td = arg;    AVFrame *in = td->in;    AVFrame *out = td->out;    const int size = s->size;    const int mid = s->mid;    int p, x, y, i, j;    for (p = 0; p < s->nb_planes; p++) {        const int h = s->planeheight[p];        const int w = s->planewidth[p];        const int slice_start = (h * jobnr) / nb_jobs;        const int slice_end = (h * (jobnr+1)) / nb_jobs;        const uint16_t *src = (uint16_t *)(in->data[p] + slice_start * in->linesize[p]);        uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]);        const int thra = s->thra[p];        const int thrb = s->thrb[p];        const uint8_t **data = (const uint8_t **)s->data[p];        const int *linesize = (const int *)s->linesize[p];        const uint16_t *srcf[SIZE];        if (!((1 << p) & s->planes)) {            av_image_copy_plane((uint8_t *)dst, out->linesize[p], (uint8_t *)src, in->linesize[p],                                w * 2, slice_end - slice_start);            continue;        }        for (i = 0; i < s->size; i++)            srcf[i] = (const uint16_t *)(data[i] + slice_start * linesize[i]);        for (y = slice_start; y < slice_end; y++) {            for (x = 0; x < w; x++) {                const int srcx = src[x];                unsigned lsumdiff = 0, rsumdiff = 0;                unsigned ldiff, rdiff;                unsigned sum = srcx;                int l = 0, r = 0;                int srcjx, srcix;                for (j = mid - 1, i = mid + 1; j >= 0 && i < size; j--, i++) {                    srcjx = srcf[j][x];                    ldiff = FFABS(srcx - srcjx);                    lsumdiff += ldiff;                    if (ldiff > thra ||                        lsumdiff > thrb)                        break;                    l++;                    sum += srcjx;                    srcix = srcf[i][x];                    rdiff = FFABS(srcx - srcix);                    rsumdiff += rdiff;                    if (rdiff > thra ||                        rsumdiff > thrb)                        break;                    r++;                    sum += srcix;                }                dst[x] = sum / (r + l + 1);            }            dst += out->linesize[p] / 2;            src += in->linesize[p] / 2;            for (i = 0; i < size; i++)                srcf[i] += linesize[i] / 2;        }    }    return 0;}
开发者ID:Diagonactic,项目名称:plex-new-transcoder,代码行数:76,


示例9: mpeg1_encode_sequence_header

/* put sequence header if needed */static void mpeg1_encode_sequence_header(MpegEncContext *s){        unsigned int vbv_buffer_size;        unsigned int fps, v;        int i;        uint64_t time_code;        float best_aspect_error= 1E10;        float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio);        int constraint_parameter_flag;        if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)        if (s->current_picture.key_frame) {            AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];            /* mpeg1 header repeated every gop */            put_header(s, SEQ_START_CODE);            put_bits(&s->pb, 12, s->width);            put_bits(&s->pb, 12, s->height);#if 0 //MEANX            for(i=1; i<15; i++){                float error= aspect_ratio;                if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1)                    error-= 1.0/ff_mpeg1_aspect[i];                else                    error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width;                error= FFABS(error);                if(error < best_aspect_error){                    best_aspect_error= error;                    s->aspect_ratio_info= i;                }            }#endif // MEANX            //MEANX put_bits(&s->pb, 4, s->aspect_ratio_info);            //MEANX put_bits(&s->pb, 4, s->frame_rate_index); // MEANX 4:3	     if(s->avctx->sample_aspect_ratio.num==16 && s->avctx->sample_aspect_ratio.den==9)            {                //printf("FFmpeg : Wide/n");                put_bits(&s->pb,4,3); //16:9            }            else        //4:3            {              if(s->codec_id == CODEC_ID_MPEG2VIDEO)                put_bits(&s->pb, 4, 2);              else                put_bits(&s->pb, 4, 12); // MPEG1            }// /MEANX// //MEANX PULLDOWN            put_bits(&s->pb, 4, s->frame_rate_index);if((s->flags2 & CODEC_FLAG2_32_PULLDOWN) && (s->codec_id == CODEC_ID_MPEG2VIDEO))            {                           put_bits(&s->pb, 4,4);            }            else            {                                                  put_bits(&s->pb, 4, s->frame_rate_index);            } //MEANX pulldown            if(s->avctx->rc_max_rate_header){ //MEANX we use header                v = (s->avctx->rc_max_rate_header + 399) / 400;                if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO)                    v = 0x3ffff;            }else{                v= 0x3FFFF;            }// MEANX we use rc_buffer_size_header here to force                // a correct rc_buffer_size            if(s->avctx->rc_buffer_size_header)                vbv_buffer_size = s->avctx->rc_buffer_size_header;            else                /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */                vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;            vbv_buffer_size= (vbv_buffer_size + 16383) / 16384;            put_bits(&s->pb, 18, v & 0x3FFFF);            put_bits(&s->pb, 1, 1); /* marker */            put_bits(&s->pb, 10, vbv_buffer_size & 0x3FF);            constraint_parameter_flag=                s->width <= 768 && s->height <= 576 &&                s->mb_width * s->mb_height <= 396 &&                s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 &&                framerate.num <= framerate.den*30 &&                s->avctx->me_range && s->avctx->me_range < 128 &&                vbv_buffer_size <= 20 &&                v <= 1856000/400 &&                s->codec_id == CODEC_ID_MPEG1VIDEO;            put_bits(&s->pb, 1, constraint_parameter_flag);            ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);            ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);//.........这里部分代码省略.........
开发者ID:BackupTheBerlios,项目名称:avidemux-svn,代码行数:101,


示例10: put_subframe

static void put_subframe(DCAContext *c,                         int32_t subband_data[8 * SUBSUBFRAMES][MAX_CHANNELS][32],                         int subframe){    int i, sub, ss, ch, max_value;    int32_t *lfe_data = c->lfe_data + 4 * SUBSUBFRAMES * subframe;    /* Subsubframes count */    put_bits(&c->pb, 2, SUBSUBFRAMES -1);    /* Partial subsubframe sample count: dummy */    put_bits(&c->pb, 3, 0);    /* Prediction mode: no ADPCM, in each channel and subband */    for (ch = 0; ch < c->prim_channels; ch++)        for (sub = 0; sub < DCA_SUBBANDS; sub++)            put_bits(&c->pb, 1, 0);    /* Prediction VQ addres: not transmitted */    /* Bit allocation index */    for (ch = 0; ch < c->prim_channels; ch++)        for (sub = 0; sub < DCA_SUBBANDS; sub++)            put_bits(&c->pb, 5, QUANTIZER_BITS+3);    if (SUBSUBFRAMES > 1) {        /* Transition mode: none for each channel and subband */        for (ch = 0; ch < c->prim_channels; ch++)            for (sub = 0; sub < DCA_SUBBANDS; sub++)                put_bits(&c->pb, 1, 0); /* codebook A4 */    }    /* Determine scale_factor */    for (ch = 0; ch < c->prim_channels; ch++)        for (sub = 0; sub < DCA_SUBBANDS; sub++) {            max_value = 0;            for (i = 0; i < 8 * SUBSUBFRAMES; i++)                max_value = FFMAX(max_value, FFABS(subband_data[i][ch][sub]));            c->scale_factor[ch][sub] = find_scale_factor7(max_value, QUANTIZER_BITS);        }    if (c->lfe_channel) {        max_value = 0;        for (i = 0; i < 4 * SUBSUBFRAMES; i++)            max_value = FFMAX(max_value, FFABS(lfe_data[i]));        c->lfe_scale_factor = find_scale_factor7(max_value, LFE_BITS);    }    /* Scale factors: the same for each channel and subband,       encoded according to Table D.1.2 */    for (ch = 0; ch < c->prim_channels; ch++)        for (sub = 0; sub < DCA_SUBBANDS; sub++)            put_bits(&c->pb, 7, c->scale_factor[ch][sub]);    /* Joint subband scale factor codebook select: not transmitted */    /* Scale factors for joint subband coding: not transmitted */    /* Stereo down-mix coefficients: not transmitted */    /* Dynamic range coefficient: not transmitted */    /* Stde information CRC check word: not transmitted */    /* VQ encoded high frequency subbands: not transmitted */    /* LFE data */    if (c->lfe_channel) {        for (i = 0; i < 4 * SUBSUBFRAMES; i++)            put_sample7(c, lfe_data[i], LFE_BITS, c->lfe_scale_factor);        put_bits(&c->pb, 8, c->lfe_scale_factor);    }    /* Audio data (subsubframes) */    for (ss = 0; ss < SUBSUBFRAMES ; ss++)        for (ch = 0; ch < c->prim_channels; ch++)            for (sub = 0; sub < DCA_SUBBANDS; sub++)                for (i = 0; i < 8; i++)                    put_sample7(c, subband_data[ss * 8 + i][ch][sub], QUANTIZER_BITS, c->scale_factor[ch][sub]);    /* DSYNC */    put_bits(&c->pb, 16, 0xffff);}
开发者ID:AdamCDunlap,项目名称:hmc-robot-drivers,代码行数:78,


示例11: swap_channel_layouts_on_filter

static void swap_channel_layouts_on_filter(AVFilterContext *filter){    AVFilterLink *link = NULL;    int i, j, k;    for (i = 0; i < filter->nb_inputs; i++) {        link = filter->inputs[i];        if (link->type == AVMEDIA_TYPE_AUDIO &&            link->out_channel_layouts->nb_channel_layouts == 1)            break;    }    if (i == filter->nb_inputs)        return;    for (i = 0; i < filter->nb_outputs; i++) {        AVFilterLink *outlink = filter->outputs[i];        int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;        if (outlink->type != AVMEDIA_TYPE_AUDIO ||            outlink->in_channel_layouts->nb_channel_layouts < 2)            continue;        for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {            uint64_t  in_chlayout = link->out_channel_layouts->channel_layouts[0];            uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];            int  in_channels      = av_get_channel_layout_nb_channels(in_chlayout);            int out_channels      = av_get_channel_layout_nb_channels(out_chlayout);            int count_diff        = out_channels - in_channels;            int matched_channels, extra_channels;            int score = 100000;            if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {                /* Compute score in case the input or output layout encodes                   a channel count; in this case the score is not altered by                   the computation afterwards, as in_chlayout and                   out_chlayout have both been set to 0 */                if (FF_LAYOUT2COUNT(in_chlayout))                    in_channels = FF_LAYOUT2COUNT(in_chlayout);                if (FF_LAYOUT2COUNT(out_chlayout))                    out_channels = FF_LAYOUT2COUNT(out_chlayout);                score -= 10000 + FFABS(out_channels - in_channels) +                         (in_channels > out_channels ? 10000 : 0);                in_chlayout = out_chlayout = 0;                /* Let the remaining computation run, even if the score                   value is not altered */            }            /* channel substitution */            for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {                uint64_t cmp0 = ch_subst[k][0];                uint64_t cmp1 = ch_subst[k][1];                if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&                    (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {                    in_chlayout  &= ~cmp0;                    out_chlayout &= ~cmp1;                    /* add score for channel match, minus a deduction for                       having to do the substitution */                    score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;                }            }            /* no penalty for LFE channel mismatch */            if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&                (out_chlayout & AV_CH_LOW_FREQUENCY))                score += 10;            in_chlayout  &= ~AV_CH_LOW_FREQUENCY;            out_chlayout &= ~AV_CH_LOW_FREQUENCY;            matched_channels = av_get_channel_layout_nb_channels(in_chlayout &                                                                 out_chlayout);            extra_channels   = av_get_channel_layout_nb_channels(out_chlayout &                                                                 (~in_chlayout));            score += 10 * matched_channels - 5 * extra_channels;            if (score > best_score ||                (count_diff < best_count_diff && score == best_score)) {                best_score = score;                best_idx   = j;                best_count_diff = count_diff;            }        }        av_assert0(best_idx >= 0);        FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],               outlink->in_channel_layouts->channel_layouts[best_idx]);    }}
开发者ID:Ivnz,项目名称:iFrameExtracotrWithFFMPEG,代码行数:88,


示例12: main

int main(void){    AVRational a,b,r;    for (a.num = -2; a.num <= 2; a.num++) {        for (a.den = -2; a.den <= 2; a.den++) {            for (b.num = -2; b.num <= 2; b.num++) {                for (b.den = -2; b.den <= 2; b.den++) {                    int c = av_cmp_q(a,b);                    double d = av_q2d(a) == av_q2d(b) ?                               0 : (av_q2d(a) - av_q2d(b));                    if (d > 0)       d = 1;                    else if (d < 0)  d = -1;                    else if (d != d) d = INT_MIN;                    if (c != d)                        av_log(NULL, AV_LOG_ERROR, "%d/%d %d/%d, %d %f/n", a.num,                               a.den, b.num, b.den, c,d);                    r = av_sub_q(av_add_q(b,a), b);                    if(b.den && (r.num*a.den != a.num*r.den || !r.num != !a.num || !r.den != !a.den))                        av_log(NULL, AV_LOG_ERROR, "%d/%d ", r.num, r.den);                }            }        }    }    for (a.num = 1; a.num <= 10; a.num++) {        for (a.den = 1; a.den <= 10; a.den++) {            if (av_gcd(a.num, a.den) > 1)                continue;            for (b.num = 1; b.num <= 10; b.num++) {                for (b.den = 1; b.den <= 10; b.den++) {                    int start;                    if (av_gcd(b.num, b.den) > 1)                        continue;                    if (av_cmp_q(b, a) < 0)                        continue;                    for (start = 0; start < 10 ; start++) {                        int acc= start;                        int i;                        for (i = 0; i<100; i++) {                            int exact = start + av_rescale_q(i+1, b, a);                            acc = av_add_stable(a, acc, b, 1);                            if (FFABS(acc - exact) > 2) {                                av_log(NULL, AV_LOG_ERROR, "%d/%d %d/%d, %d %d/n", a.num,                                       a.den, b.num, b.den, acc, exact);                                return 1;                            }                        }                    }                }            }        }    }    for (a.den = 1; a.den < 0x100000000U/3; a.den*=3) {        for (a.num = -1; a.num < (1<<27); a.num += 1 + a.num/100) {            float f  = av_int2float(av_q2intfloat(a));            float f2 = av_q2d(a);            if (fabs(f - f2) > fabs(f)/5000000) {                av_log(NULL, AV_LOG_ERROR, "%d/%d %f %f/n", a.num,                       a.den, f, f2);                return 1;            }        }    }    return 0;}
开发者ID:309746069,项目名称:FFmpeg,代码行数:69,


示例13: vectorscope8

static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd){    const uint8_t * const *src = (const uint8_t * const *)in->data;    const int slinesizex = in->linesize[s->x];    const int slinesizey = in->linesize[s->y];    const int slinesized = in->linesize[pd];    const int dlinesize = out->linesize[0];    const int intensity = s->intensity;    const int px = s->x, py = s->y;    const int h = s->planeheight[py];    const int w = s->planewidth[px];    const uint8_t *spx = src[px];    const uint8_t *spy = src[py];    const uint8_t *spd = src[pd];    const int hsub = s->hsub;    const int vsub = s->vsub;    uint8_t **dst = out->data;    uint8_t *dpx = dst[px];    uint8_t *dpy = dst[py];    uint8_t *dpd = dst[pd];    const int tmin = s->tmin;    const int tmax = s->tmax;    int i, j, k;    for (k = 0; k < 4 && dst[k]; k++)        for (i = 0; i < out->height ; i++)            memset(dst[k] + i * out->linesize[k],                   (s->mode == COLOR || s->mode == COLOR5) && k == s->pd ? 0 : s->bg_color[k], out->width);    switch (s->mode) {    case COLOR5:    case COLOR:    case GRAY:        if (s->is_yuv) {            for (i = 0; i < h; i++) {                const int iwx = i * slinesizex;                const int iwy = i * slinesizey;                const int iwd = i * slinesized;                for (j = 0; j < w; j++) {                    const int x = spx[iwx + j];                    const int y = spy[iwy + j];                    const int z = spd[iwd + j];                    const int pos = y * dlinesize + x;                    if (z < tmin || z > tmax)                        continue;                    dpd[pos] = FFMIN(dpd[pos] + intensity, 255);                    if (dst[3])                        dst[3][pos] = 255;                }            }        } else {            for (i = 0; i < h; i++) {                const int iwx = i * slinesizex;                const int iwy = i * slinesizey;                const int iwd = i * slinesized;                for (j = 0; j < w; j++) {                    const int x = spx[iwx + j];                    const int y = spy[iwy + j];                    const int z = spd[iwd + j];                    const int pos = y * dlinesize + x;                    if (z < tmin || z > tmax)                        continue;                    dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255);                    dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255);                    dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255);                    if (dst[3])                        dst[3][pos] = 255;                }            }        }        break;    case COLOR2:        if (s->is_yuv) {            for (i = 0; i < h; i++) {                const int iw1 = i * slinesizex;                const int iw2 = i * slinesizey;                const int iwd = i * slinesized;                for (j = 0; j < w; j++) {                    const int x = spx[iw1 + j];                    const int y = spy[iw2 + j];                    const int z = spd[iwd + j];                    const int pos = y * dlinesize + x;                    if (z < tmin || z > tmax)                        continue;                    if (!dpd[pos])                        dpd[pos] = FFABS(128 - x) + FFABS(128 - y);                    dpx[pos] = x;                    dpy[pos] = y;                    if (dst[3])                        dst[3][pos] = 255;                }            }        } else {            for (i = 0; i < h; i++) {//.........这里部分代码省略.........
开发者ID:SilverCrux,项目名称:FFmpeg,代码行数:101,


示例14: flv_read_packet

static int flv_read_packet(AVFormatContext *s, AVPacket *pkt){    FLVContext *flv = s->priv_data;    int ret, i, type, size, flags;    int stream_type=-1;    int64_t next, pos;    int64_t dts, pts = AV_NOPTS_VALUE;    int av_uninit(channels);    int av_uninit(sample_rate);    AVStream *st = NULL;    for(;; avio_skip(s->pb, 4)) { /* pkt size is repeated at end. skip it */        pos = avio_tell(s->pb);        type = avio_r8(s->pb);        size = avio_rb24(s->pb);        dts = avio_rb24(s->pb);        dts |= avio_r8(s->pb) << 24;        av_dlog(s, "type:%d, size:%d, dts:%"PRId64"/n", type, size, dts);        if (url_feof(s->pb))            return AVERROR_EOF;        avio_skip(s->pb, 3); /* stream id, always 0 */        flags = 0;        if (flv->validate_next < flv->validate_count) {            int64_t validate_pos = flv->validate_index[flv->validate_next].pos;            if (pos == validate_pos) {                if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <=                        VALIDATE_INDEX_TS_THRESH) {                    flv->validate_next++;                } else {                    clear_index_entries(s, validate_pos);                    flv->validate_count = 0;                }            } else if (pos > validate_pos) {                clear_index_entries(s, validate_pos);                flv->validate_count = 0;            }        }        if(size == 0)            continue;        next= size + avio_tell(s->pb);        if (type == FLV_TAG_TYPE_AUDIO) {            stream_type=FLV_STREAM_TYPE_AUDIO;            flags = avio_r8(s->pb);            size--;        } else if (type == FLV_TAG_TYPE_VIDEO) {            stream_type=FLV_STREAM_TYPE_VIDEO;            flags = avio_r8(s->pb);            size--;            if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD)                goto skip;        } else if (type == FLV_TAG_TYPE_META) {            if (size > 13+1+4 && dts == 0) { // Header-type metadata stuff                flv_read_metabody(s, next);                goto skip;            } else if (dts != 0) { // Script-data "special" metadata frames - don't skip                stream_type=FLV_STREAM_TYPE_DATA;            } else {                goto skip;            }        } else {            av_log(s, AV_LOG_DEBUG, "skipping flv packet: type %d, size %d, flags %d/n", type, size, flags);skip:            avio_seek(s->pb, next, SEEK_SET);            continue;        }        /* skip empty data packets */        if (!size)            continue;        /* now find stream */        for(i=0; i<s->nb_streams; i++) {            st = s->streams[i];            if (st->id == stream_type)                break;        }        if(i == s->nb_streams) {            av_log(s, AV_LOG_WARNING, "Stream discovered after head already parsed/n");            st = create_stream(s, stream_type,            (int[]) {                AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_DATA            }[stream_type]);        }        av_dlog(s, "%d %X %d /n", stream_type, flags, st->discard);        if(  (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO)))                ||(st->discard >= AVDISCARD_BIDIR  &&  ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO)))                || st->discard >= AVDISCARD_ALL          ) {            avio_seek(s->pb, next, SEEK_SET);            continue;        }        if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)            av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);        break;    }
开发者ID:zzilla,项目名称:ONVIF-Device-Manager,代码行数:99,


示例15: AAC_RENAME

//.........这里部分代码省略.........                goto err;        }    } else        memset(ps->iid_par, 0, sizeof(ps->iid_par));    if (ps->enable_icc)        for (e = 0; e < ps->num_env; e++) {            int dt = get_bits1(gb);            if (read_icc_data(avctx, gb, ps, ps->icc_par, dt ? huff_icc_dt : huff_icc_df, e, dt))                goto err;        }    else        memset(ps->icc_par, 0, sizeof(ps->icc_par));    if (ps->enable_ext) {        int cnt = get_bits(gb, 4);        if (cnt == 15) {            cnt += get_bits(gb, 8);        }        cnt *= 8;        while (cnt > 7) {            int ps_extension_id = get_bits(gb, 2);            cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id);        }        if (cnt < 0) {            av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d/n", cnt);            goto err;        }        skip_bits(gb, cnt);    }    ps->enable_ipdopd &= !PS_BASELINE;    //Fix up envelopes    if (!ps->num_env || ps->border_position[ps->num_env] < numQMFSlots - 1) {        //Create a fake envelope        int source = ps->num_env ? ps->num_env - 1 : ps->num_env_old - 1;        int b;        if (source >= 0 && source != ps->num_env) {            if (ps->enable_iid) {                memcpy(ps->iid_par+ps->num_env, ps->iid_par+source, sizeof(ps->iid_par[0]));            }            if (ps->enable_icc) {                memcpy(ps->icc_par+ps->num_env, ps->icc_par+source, sizeof(ps->icc_par[0]));            }            if (ps->enable_ipdopd) {                memcpy(ps->ipd_par+ps->num_env, ps->ipd_par+source, sizeof(ps->ipd_par[0]));                memcpy(ps->opd_par+ps->num_env, ps->opd_par+source, sizeof(ps->opd_par[0]));            }        }        if (ps->enable_iid){            for (b = 0; b < ps->nr_iid_par; b++) {                if (FFABS(ps->iid_par[ps->num_env][b]) > 7 + 8 * ps->iid_quant) {                    av_log(avctx, AV_LOG_ERROR, "iid_par invalid/n");                    goto err;                }            }        }        if (ps->enable_icc){            for (b = 0; b < ps->nr_iid_par; b++) {                if (ps->icc_par[ps->num_env][b] > 7U) {                    av_log(avctx, AV_LOG_ERROR, "icc_par invalid/n");                    goto err;                }            }        }        ps->num_env++;        ps->border_position[ps->num_env] = numQMFSlots - 1;    }    ps->is34bands_old = ps->is34bands;    if (!PS_BASELINE && (ps->enable_iid || ps->enable_icc))        ps->is34bands = (ps->enable_iid && ps->nr_iid_par == 34) ||                        (ps->enable_icc && ps->nr_icc_par == 34);    //Baseline    if (!ps->enable_ipdopd) {        memset(ps->ipd_par, 0, sizeof(ps->ipd_par));        memset(ps->opd_par, 0, sizeof(ps->opd_par));    }    if (header)        ps->start = 1;    bits_consumed = get_bits_count(gb) - bit_count_start;    if (bits_consumed <= bits_left) {        skip_bits_long(gb_host, bits_consumed);        return bits_consumed;    }    av_log(avctx, AV_LOG_ERROR, "Expected to read %d PS bits actually read %d./n", bits_left, bits_consumed);err:    ps->start = 0;    skip_bits_long(gb_host, bits_left);    memset(ps->iid_par, 0, sizeof(ps->iid_par));    memset(ps->icc_par, 0, sizeof(ps->icc_par));    memset(ps->ipd_par, 0, sizeof(ps->ipd_par));    memset(ps->opd_par, 0, sizeof(ps->opd_par));    return bits_left;}
开发者ID:clook,项目名称:FFmpeg,代码行数:101,


示例16: encode_init

static av_cold int encode_init(AVCodecContext *avctx){    MpegEncContext *s = avctx->priv_data;    if(avctx->profile == FF_PROFILE_UNKNOWN){        if(avctx->level != FF_LEVEL_UNKNOWN){            av_log(avctx, AV_LOG_ERROR, "Set profile and level/n");            return -1;        }        avctx->profile = avctx->pix_fmt == PIX_FMT_YUV420P ? 4 : 0; /* Main or 4:2:2 */    }    if(avctx->level == FF_LEVEL_UNKNOWN){        if(avctx->profile == 0){ /* 4:2:2 */            if(avctx->width <= 720 && avctx->height <= 608) avctx->level = 5; /* Main */            else                                            avctx->level = 2; /* High */        }else{            if(avctx->profile != 1 && avctx->pix_fmt != PIX_FMT_YUV420P){                av_log(avctx, AV_LOG_ERROR, "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling/n");                return -1;            }            if(avctx->width <= 720 && avctx->height <= 576) avctx->level = 8; /* Main */            else if(avctx->width <= 1440)                   avctx->level = 6; /* High 1440 */            else                                            avctx->level = 4; /* High */        }    }    if (avctx->rc_max_rate && !avctx->rc_buffer_size) {        int max = 0;        if (avctx->profile == 0) {            max = avctx->level == 5 ? 9437184 : 47185920;        } else if (avctx->profile == 4) {            switch (avctx->level) {            case 8: max = 1835008; break;            case 6: max = 7340032; break;            case 4: max = 9781248; break;            }        } else {            switch (avctx->level) {            case 8: max = 2441216; break;            case 6: max = 9781248; break;            case 4: max = 12222464; break;            }        }        avctx->rc_buffer_size = FFMIN(max, avctx->rc_max_rate*65535LL/90000);    }    switch (avctx->color_primaries) {    case AVCOL_PRI_BT709:        avctx->color_transfer = AVCOL_TRC_BT709;        avctx->color_matrix = AVCOL_MTX_BT709;        break;    case AVCOL_PRI_SMPTE170M:    case AVCOL_PRI_BT470BG:        avctx->color_transfer = AVCOL_TRC_BT709;        avctx->color_matrix = AVCOL_MTX_SMPTE170M;        break;    }    if(MPV_encode_init(avctx) < 0)        return -1;    if (avctx->sample_aspect_ratio.num > 0 &&        avctx->sample_aspect_ratio.den > 0) {        float best_aspect_error = 1E10;        float aspect_ratio = av_q2d(s->avctx->sample_aspect_ratio);        int i;        for (i = 1; i < 15; i++) {            float error = aspect_ratio;            if (s->codec_id == CODEC_ID_MPEG1VIDEO || i <= 1)                error -= 1.0/ff_mpeg1_aspect[i];            else                error -= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width;            error = FFABS(error);            // <= so square pixels can match 4:3 or 16:9            if (error <= best_aspect_error) {                best_aspect_error = error;                s->aspect_ratio_info = i;            }        }    }    if (!s->aspect_ratio_info)        s->aspect_ratio_info = 1;    if(find_frame_rate_index(s) < 0){        if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){            av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps/n", avctx->time_base.den, avctx->time_base.num);            return -1;        }else{            av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues/n", avctx->time_base.den, avctx->time_base.num);        }    }    if (s->timecode) {        int drop, framenum;        AVRational fps = ff_frame_rate_tab[s->frame_rate_index];        framenum = ff_timecode_to_framenum(s->timecode, (AVRational){fps.den, fps.num}, &drop);        if (framenum < 0) {//.........这里部分代码省略.........
开发者ID:jfuentesbrevity,项目名称:ffmbc,代码行数:101,


示例17: vectorscope

static void vectorscope(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd){    const uint8_t * const *src = (const uint8_t * const *)in->data;    const int slinesizex = in->linesize[s->x];    const int slinesizey = in->linesize[s->y];    const int dlinesize = out->linesize[0];    int i, j, px = s->x, py = s->y;    const int h = s->planeheight[py];    const int w = s->planewidth[px];    const uint8_t *spx = src[px];    const uint8_t *spy = src[py];    uint8_t **dst = out->data;    uint8_t *dpx = dst[px];    uint8_t *dpy = dst[py];    uint8_t *dpd = dst[pd];    switch (s->mode) {    case COLOR:    case GRAY:        if (s->is_yuv) {            for (i = 0; i < h; i++) {                const int iwx = i * slinesizex;                const int iwy = i * slinesizey;                for (j = 0; j < w; j++) {                    const int x = spx[iwx + j];                    const int y = spy[iwy + j];                    const int pos = y * dlinesize + x;                    dpd[pos] = FFMIN(dpd[pos] + 1, 255);                    if (dst[3])                        dst[3][pos] = 255;                }            }        } else {            for (i = 0; i < h; i++) {                const int iwx = i * slinesizex;                const int iwy = i * slinesizey;                for (j = 0; j < w; j++) {                    const int x = spx[iwx + j];                    const int y = spy[iwy + j];                    const int pos = y * dlinesize + x;                    dst[0][pos] = FFMIN(dst[0][pos] + 1, 255);                    dst[1][pos] = FFMIN(dst[1][pos] + 1, 255);                    dst[2][pos] = FFMIN(dst[2][pos] + 1, 255);                    if (dst[3])                        dst[3][pos] = 255;                }            }        }        if (s->mode == COLOR) {            for (i = 0; i < out->height; i++) {                for (j = 0; j < out->width; j++) {                    if (!dpd[i * out->linesize[pd] + j]) {                        dpx[i * out->linesize[px] + j] = j;                        dpy[i * out->linesize[py] + j] = i;                    }                }            }        }        break;    case COLOR2:        if (s->is_yuv) {            for (i = 0; i < h; i++) {                const int iw1 = i * slinesizex;                const int iw2 = i * slinesizey;                for (j = 0; j < w; j++) {                    const int x = spx[iw1 + j];                    const int y = spy[iw2 + j];                    const int pos = y * dlinesize + x;                    if (!dpd[pos])                        dpd[pos] = FFABS(128 - x) + FFABS(128 - y);                    dpx[pos] = x;                    dpy[pos] = y;                    if (dst[3])                        dst[3][pos] = 255;                }            }        } else {            for (i = 0; i < h; i++) {                const int iw1 = i * slinesizex;                const int iw2 = i * slinesizey;                for (j = 0; j < w; j++) {                    const int x = spx[iw1 + j];                    const int y = spy[iw2 + j];                    const int pos = y * dlinesize + x;                    if (!dpd[pos])                        dpd[pos] = FFMIN(x + y, 255);                    dpx[pos] = x;                    dpy[pos] = y;                    if (dst[3])                        dst[3][pos] = 255;                }            }        }        break;    case COLOR3:        for (i = 0; i < h; i++) {//.........这里部分代码省略.........
开发者ID:quanxinglong,项目名称:FFmpeg,代码行数:101,


示例18: msrle_decode_pal4

static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,                             GetByteContext *gb){    unsigned char rle_code;    unsigned char extra_byte, odd_pixel;    unsigned char stream_byte;    unsigned int pixel_ptr = 0;    int row_dec = pic->linesize[0];    int row_ptr = (avctx->height - 1) * row_dec;    int frame_size = FFABS(row_dec) * avctx->height;    int i;    while (row_ptr >= 0) {        if (bytestream2_get_bytes_left(gb) <= 0) {            av_log(avctx, AV_LOG_ERROR,                   "MS RLE: bytestream overrun, %d rows left/n",                   row_ptr);            return AVERROR_INVALIDDATA;        }        rle_code = stream_byte = bytestream2_get_byteu(gb);        if (rle_code == 0) {            /* fetch the next byte to see how to handle escape code */            stream_byte = bytestream2_get_byte(gb);            if (stream_byte == 0) {                /* line is done, goto the next one */                row_ptr -= row_dec;                pixel_ptr = 0;            } else if (stream_byte == 1) {                /* decode is done */                return 0;            } else if (stream_byte == 2) {                /* reposition frame decode coordinates */                stream_byte = bytestream2_get_byte(gb);                pixel_ptr += stream_byte;                stream_byte = bytestream2_get_byte(gb);                row_ptr -= stream_byte * row_dec;            } else {                // copy pixels from encoded stream                odd_pixel =  stream_byte & 1;                rle_code = (stream_byte + 1) / 2;                extra_byte = rle_code & 0x01;                if (row_ptr + pixel_ptr + stream_byte > frame_size ||                    bytestream2_get_bytes_left(gb) < rle_code) {                    av_log(avctx, AV_LOG_ERROR,                           "MS RLE: frame/stream ptr just went out of bounds (copy)/n");                    return AVERROR_INVALIDDATA;                }                for (i = 0; i < rle_code; i++) {                    if (pixel_ptr >= avctx->width)                        break;                    stream_byte = bytestream2_get_byteu(gb);                    pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4;                    pixel_ptr++;                    if (i + 1 == rle_code && odd_pixel)                        break;                    if (pixel_ptr >= avctx->width)                        break;                    pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F;                    pixel_ptr++;                }                // if the RLE code is odd, skip a byte in the stream                if (extra_byte)                    bytestream2_skip(gb, 1);            }        } else {            // decode a run of data            if (row_ptr + pixel_ptr + stream_byte > frame_size) {
开发者ID:OS2World,项目名称:LIB-libav,代码行数:69,


示例19: CreatePopUpMenu

//.........这里部分代码省略.........//    AddMenuItem( SubMenu,"Fwd 10 sec", evForward10sec );//    AddMenuItem( SubMenu,"Back 1 min", evBackward1min );//    AddMenuItem( SubMenu,"Fwd 1 min", evForward1min );//   SubMenu=AddSubMenu( Menu,MSGTR_MENU_Size );//    AddMenuItem( SubMenu,MSGTR_GUI_SizeNormal"      ", evNormalSize );//    AddMenuItem( SubMenu,MSGTR_GUI_SizeDouble, evDoubleSize );//    AddMenuItem( SubMenu,MSGTR_GUI_SizeFullscreen, evFullScreen + ( True << 16 ) );  if ( guiInfo.VideoWindow )   {    AddSeparator( Menu );    RotationMenu=AddSubMenu( window1, (const char*)rotate_xpm, Menu,MSGTR_GUI_Rotation );    N=AddMenuCheckItem( window1, (const char*)rotate0_xpm, RotationMenu,MSGTR_GUI__none_, guiInfo.Rotation == -1, evSetRotation );    D=AddMenuCheckItem( window1, (const char*)rotate90cw_xpm, RotationMenu,MSGTR_GUI_Rotation90CW, guiInfo.Rotation == 1, evSetRotation + ( 90 << 16 ) );    F=AddMenuCheckItem( window1, (const char*)rotate90ccw_xpm, RotationMenu,MSGTR_GUI_Rotation90CCW, guiInfo.Rotation == 2, evSetRotation + ( -90 << 16 ) );    H=AddMenuCheckItem( window1, (const char*)rotate180_xpm, RotationMenu,MSGTR_GUI_Rotation180, guiInfo.Rotation == 8, evSetRotation + ( 180 << 16 ) );    if ( !guiInfo.Playing )     {      gtk_widget_set_sensitive( N,FALSE );      gtk_widget_set_sensitive( D,FALSE );      gtk_widget_set_sensitive( F,FALSE );      gtk_widget_set_sensitive( H,FALSE );     }   }  if ( guiInfo.VideoWindow )   {    int a11 = False, a169 = False, a43 = False, a235 = False;    if (movie_aspect == -1.0f) a11 = True;    else     {       a169 = (FFABS(movie_aspect - 16.0f / 9.0f) <= 0.01f);       a43 = (FFABS(movie_aspect - 4.0f / 3.0f) <= 0.01f);       a235 = (FFABS(movie_aspect - 2.35f) <= 0.01f);     }    AspectMenu=AddSubMenu( window1, (const char*)aspect_xpm, Menu,MSGTR_GUI_AspectRatio );    H=AddMenuCheckItem( window1, (const char*)aspect11_xpm, AspectMenu,MSGTR_GUI_Original, a11, evSetAspect + ( 1 << 16 ) );    N=AddMenuCheckItem( window1, (const char*)aspect169_xpm, AspectMenu,"16:9", a169, evSetAspect + ( 2 << 16 ) );    D=AddMenuCheckItem( window1, (const char*)aspect43_xpm, AspectMenu,"4:3", a43, evSetAspect + ( 3 << 16 ) );    F=AddMenuCheckItem( window1, (const char*)aspect235_xpm, AspectMenu,MSGTR_GUI_235To1, a235, evSetAspect + ( 4 << 16 ) );    if ( !guiInfo.Playing )     {      gtk_widget_set_sensitive( H,FALSE );      gtk_widget_set_sensitive( N,FALSE );      gtk_widget_set_sensitive( D,FALSE );      gtk_widget_set_sensitive( F,FALSE );     }   }  if ( guiInfo.VideoWindow )   {    int b1 = False, b2 = False, b_half = False;    if ( !guiApp.videoWindow.isFullScreen && guiInfo.Playing )     {      if ( ( guiApp.videoWindow.Width == guiInfo.VideoWidth * 2 )&&           ( guiApp.videoWindow.Height == guiInfo.VideoHeight * 2 ) ) b2=True;      else if ( ( guiApp.videoWindow.Width == guiInfo.VideoWidth / 2 ) &&                ( guiApp.videoWindow.Height == guiInfo.VideoHeight / 2 ) ) b_half=True;      else b1=( guiApp.videoWindow.Width == guiInfo.VideoWidth && guiApp.videoWindow.Height == guiInfo.VideoHeight );     } else b1=!guiApp.videoWindow.isFullScreen;    F=AddMenuCheckItem( window1, (const char*)full_xpm, Menu,MSGTR_GUI_SizeFullscreen,guiApp.videoWindow.isFullScreen,evFullScreen + ( True << 16 ) );
开发者ID:basinilya,项目名称:mplayer,代码行数:67,


示例20: flashsv_encode_frame

static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt,                                const AVFrame *pict, int *got_packet){    FlashSVContext * const s = avctx->priv_data;    const AVFrame * const p = pict;    uint8_t *pfptr;    int res;    int I_frame = 0;    int opt_w = 4, opt_h = 4;    /* First frame needs to be a keyframe */    if (avctx->frame_number == 0) {        s->previous_frame = av_mallocz(FFABS(p->linesize[0]) * s->image_height);        if (!s->previous_frame) {            av_log(avctx, AV_LOG_ERROR, "Memory allocation failed./n");            return AVERROR(ENOMEM);        }        I_frame = 1;    }    if (p->linesize[0] < 0)        pfptr = s->previous_frame - (s->image_height - 1) * p->linesize[0];    else        pfptr = s->previous_frame;    /* Check the placement of keyframes */    if (avctx->gop_size > 0 &&        avctx->frame_number >= s->last_key_frame + avctx->gop_size) {        I_frame = 1;    }    if ((res = ff_alloc_packet(pkt, s->image_width * s->image_height * 3)) < 0) {        //Conservative upper bound check for compressed data        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d./n",               s->image_width * s->image_height * 3);        return res;    }    pkt->size = encode_bitstream(s, p, pkt->data, pkt->size, opt_w * 16, opt_h * 16,                                 pfptr, &I_frame);    //save the current frame    if (p->linesize[0] > 0)        memcpy(s->previous_frame, p->data[0], s->image_height * p->linesize[0]);    else        memcpy(s->previous_frame,               p->data[0] + p->linesize[0] * (s->image_height - 1),               s->image_height * FFABS(p->linesize[0]));    //mark the frame type so the muxer can mux it correctly    if (I_frame) {        avctx->coded_frame->pict_type      = AV_PICTURE_TYPE_I;        avctx->coded_frame->key_frame      = 1;        s->last_key_frame = avctx->frame_number;        av_dlog(avctx, "Inserting keyframe at frame %d/n", avctx->frame_number);    } else {        avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;        avctx->coded_frame->key_frame = 0;    }    if (avctx->coded_frame->key_frame)        pkt->flags |= AV_PKT_FLAG_KEY;    *got_packet = 1;    return 0;}
开发者ID:Acidburn0zzz,项目名称:libav,代码行数:66,


示例21: dts_probe

static int dts_probe(AVProbeData *p){    const uint8_t *buf, *bufp;    uint32_t state = -1;    int markers[4*16] = {0};    int sum, max, i;    int64_t diff = 0;    uint8_t hdr[12 + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 };    buf = p->buf + FFMIN(4096, p->buf_size);    for(; buf < (p->buf+p->buf_size)-2; buf+=2) {        int marker, sample_blocks, sample_rate, sr_code, framesize;        int lfe;        GetBitContext gb;        bufp = buf;        state = (state << 16) | bytestream_get_be16(&bufp);        if (buf - p->buf >= 4)            diff += FFABS(((int16_t)AV_RL16(buf)) - (int16_t)AV_RL16(buf-4));        /* regular bitstream */        if (state == DCA_SYNCWORD_CORE_BE)            marker = 0;        else if (state == DCA_SYNCWORD_CORE_LE)            marker = 1;        /* 14 bits big-endian bitstream */        else if (state == DCA_SYNCWORD_CORE_14B_BE &&                 (bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0)            marker = 2;        /* 14 bits little-endian bitstream */        else if (state == DCA_SYNCWORD_CORE_14B_LE &&                 (bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007)            marker = 3;        else            continue;        if (avpriv_dca_convert_bitstream(buf-2, 12, hdr, 12) < 0)            continue;        init_get_bits(&gb, hdr, 96);        skip_bits_long(&gb, 39);        sample_blocks = get_bits(&gb, 7) + 1;        if (sample_blocks < 8)            continue;        framesize = get_bits(&gb, 14) + 1;        if (framesize < 95)            continue;        skip_bits(&gb, 6);        sr_code = get_bits(&gb, 4);        sample_rate = avpriv_dca_sample_rates[sr_code];        if (sample_rate == 0)            continue;        get_bits(&gb, 5);        if (get_bits(&gb, 1))            continue;        skip_bits_long(&gb, 9);        lfe = get_bits(&gb, 2);        if (lfe > 2)            continue;        marker += 4* sr_code;        markers[marker] ++;    }    sum = max = 0;    for (i=0; i<FF_ARRAY_ELEMS(markers); i++) {        sum += markers[i];        if (markers[max] < markers[i])            max = i;    }    if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 &&        markers[max] * 4 > sum * 3 &&        diff / p->buf_size > 200)        return AVPROBE_SCORE_EXTENSION + 1;    return 0;}
开发者ID:63n,项目名称:FFmpeg,代码行数:88,


示例22: flashsv_encode_frame

static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data){    FlashSVContext * const s = avctx->priv_data;    AVFrame *pict = data;    AVFrame * const p = &s->frame;    uint8_t *pfptr;    int res;    int I_frame = 0;    int opt_w, opt_h;    *p = *pict;    /* First frame needs to be a keyframe */    if (avctx->frame_number == 0) {        s->previous_frame = av_mallocz(FFABS(p->linesize[0])*s->image_height);        if (!s->previous_frame) {            av_log(avctx, AV_LOG_ERROR, "Memory allocation failed./n");            return -1;        }        I_frame = 1;    }    if (p->linesize[0] < 0)        pfptr = s->previous_frame - ((s->image_height-1) * p->linesize[0]);    else        pfptr = s->previous_frame;    /* Check the placement of keyframes */    if (avctx->gop_size > 0) {        if (avctx->frame_number >= s->last_key_frame + avctx->gop_size) {            I_frame = 1;        }    }    opt_w=4;    opt_h=4;    if (buf_size < s->image_width*s->image_height*3) {        //Conservative upper bound check for compressed data        av_log(avctx, AV_LOG_ERROR, "buf_size %d <  %d/n", buf_size, s->image_width*s->image_height*3);        return -1;    }    res = encode_bitstream(s, p, buf, buf_size, opt_w*16, opt_h*16, pfptr, &I_frame);    //save the current frame    if(p->linesize[0] > 0)        memcpy(s->previous_frame, p->data[0], s->image_height*p->linesize[0]);    else        memcpy(s->previous_frame, p->data[0] + p->linesize[0] * (s->image_height-1), s->image_height*FFABS(p->linesize[0]));    //mark the frame type so the muxer can mux it correctly    if (I_frame) {        p->pict_type = FF_I_TYPE;        p->key_frame = 1;        s->last_key_frame = avctx->frame_number;        av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d/n",avctx->frame_number);    } else {        p->pict_type = FF_P_TYPE;        p->key_frame = 0;    }    avctx->coded_frame = p;    return res;}
开发者ID:BillyYi,项目名称:FFMpeg,代码行数:66,


示例23: get_vlc2

        int val = 0; /        for (b = 0; b < num; b++) { /            val += get_vlc2(gb, vlc_table, 9, 3) - OFFSET; /            if (MASK) val &= MASK; /            PAR[e][b] = val; /            if (ERR_CONDITION) /                goto err; /        } /    } /    return 0; /err: /    av_log(avctx, AV_LOG_ERROR, "illegal "#PAR"/n"); /    return -1; /}READ_PAR_DATA(iid,    huff_offset[table_idx],    0, FFABS(ps->iid_par[e][b]) > 7 + 8 * ps->iid_quant)READ_PAR_DATA(icc,    huff_offset[table_idx],    0, ps->icc_par[e][b] > 7U)READ_PAR_DATA(ipdopd,                      0, 0x07, 0)static int ps_read_extension_data(GetBitContext *gb, PSContext *ps, int ps_extension_id){    int e;    int count = get_bits_count(gb);    if (ps_extension_id)        return 0;    ps->enable_ipdopd = get_bits1(gb);    if (ps->enable_ipdopd) {        for (e = 0; e < ps->num_env; e++) {            int dt = get_bits1(gb);
开发者ID:Arcen,项目名称:FFmpeg,代码行数:31,


示例24: main

int main(int argc, char **argv){    int in_sample_rate, out_sample_rate, ch ,i, in_ch_layout_index, out_ch_layout_index, osr, flush_count;    uint64_t in_ch_layout, out_ch_layout;    enum AVSampleFormat in_sample_fmt, out_sample_fmt;    int sample_rates[]={8000,11025,16000,22050,32000};    uint8_t array_in[SAMPLES*8*8];    uint8_t array_mid[SAMPLES*8*8*3];    uint8_t array_out[SAMPLES*8*8+100];    uint8_t *ain[SWR_CH_MAX];    uint8_t *aout[SWR_CH_MAX];    uint8_t *amid[SWR_CH_MAX];    struct SwrContext * forw_ctx= NULL;    struct SwrContext *backw_ctx= NULL;    in_sample_rate=16000;    for(osr=0; osr<5; osr++){        out_sample_rate= sample_rates[osr];        for(in_sample_fmt= AV_SAMPLE_FMT_U8; in_sample_fmt<=AV_SAMPLE_FMT_DBL; in_sample_fmt++){            for(out_sample_fmt= AV_SAMPLE_FMT_U8; out_sample_fmt<=AV_SAMPLE_FMT_DBL; out_sample_fmt++){                for(in_ch_layout_index=0; layouts[in_ch_layout_index]; in_ch_layout_index++){                    in_ch_layout= layouts[in_ch_layout_index];                    int in_ch_count= av_get_channel_layout_nb_channels(in_ch_layout);                    for(out_ch_layout_index=0; layouts[out_ch_layout_index]; out_ch_layout_index++){                        int out_count, mid_count;                        out_ch_layout= layouts[out_ch_layout_index];                        int out_ch_count= av_get_channel_layout_nb_channels(out_ch_layout);                        fprintf(stderr, "ch %d->%d, rate:%5d->%5d, fmt:%s->%s",                               in_ch_count, out_ch_count,                               in_sample_rate, out_sample_rate,                               av_get_sample_fmt_name(in_sample_fmt), av_get_sample_fmt_name(out_sample_fmt));                        forw_ctx  = swr_alloc_set_opts(forw_ctx, out_ch_layout, av_get_alt_sample_fmt(out_sample_fmt, 1), out_sample_rate,                                                                  in_ch_layout, av_get_alt_sample_fmt( in_sample_fmt, 1),  in_sample_rate,                                                       0, 0);                        backw_ctx = swr_alloc_set_opts(backw_ctx, in_ch_layout,  in_sample_fmt,             in_sample_rate,                                                                 out_ch_layout, av_get_alt_sample_fmt(out_sample_fmt, 1), out_sample_rate,                                                       0, 0);                        if(swr_init( forw_ctx) < 0)                            fprintf(stderr, "swr_init(->) failed/n");                        if(swr_init(backw_ctx) < 0)                            fprintf(stderr, "swr_init(<-) failed/n");                        if(!forw_ctx)                            fprintf(stderr, "Failed to init forw_cts/n");                        if(!backw_ctx)                            fprintf(stderr, "Failed to init backw_ctx/n");                               //FIXME test planar                        setup_array(ain , array_in , av_get_alt_sample_fmt( in_sample_fmt, 1),   SAMPLES);                        setup_array(amid, array_mid, av_get_alt_sample_fmt(out_sample_fmt, 1), 3*SAMPLES);                        setup_array(aout, array_out,  in_sample_fmt           ,   SAMPLES);                        for(ch=0; ch<in_ch_count; ch++){                            for(i=0; i<SAMPLES; i++)                                set(ain, ch, i, in_ch_count, av_get_alt_sample_fmt(in_sample_fmt, 1), sin(i*i*3/SAMPLES));                        }                        mid_count= swr_convert(forw_ctx, amid, 3*SAMPLES, ain, SAMPLES);                        out_count= swr_convert(backw_ctx,aout, SAMPLES, amid, mid_count);                        for(ch=0; ch<in_ch_count; ch++){                            double sse, x, maxdiff=0;                            double sum_a= 0;                            double sum_b= 0;                            double sum_aa= 0;                            double sum_bb= 0;                            double sum_ab= 0;                            for(i=0; i<out_count; i++){                                double a= get(ain , ch, i, in_ch_count, av_get_alt_sample_fmt(in_sample_fmt, 1));                                double b= get(aout, ch, i, in_ch_count, in_sample_fmt);                                sum_a += a;                                sum_b += b;                                sum_aa+= a*a;                                sum_bb+= b*b;                                sum_ab+= a*b;                                maxdiff= FFMAX(maxdiff, FFABS(a-b));                            }                            x = sum_ab/sum_bb;                            sse= sum_aa + sum_bb*x*x - 2*x*sum_ab;                            fprintf(stderr, "[%f %f %f] len:%5d/n", sqrt(sse/out_count), x, maxdiff, out_count);                        }                        flush_count=swr_convert(backw_ctx,aout, SAMPLES, 0, 0);                        if(flush_count){                            for(ch=0; ch<in_ch_count; ch++){                                double sse, x, maxdiff=0;                                double sum_a= 0;                                double sum_b= 0;                                double sum_aa= 0;                                double sum_bb= 0;                                double sum_ab= 0;                                for(i=0; i<flush_count; i++){                                    double a= get(ain , ch, i+out_count, in_ch_count, av_get_alt_sample_fmt(in_sample_fmt, 1));                                    double b= get(aout, ch, i, in_ch_count, in_sample_fmt);                                    sum_a += a;                                    sum_b += b;                                    sum_aa+= a*a;                                    sum_bb+= b*b;                                    sum_ab+= a*b;                                    maxdiff= FFMAX(maxdiff, FFABS(a-b));                                }                                x = sum_ab/sum_bb;                                sse= sum_aa + sum_bb*x*x - 2*x*sum_ab;//.........这里部分代码省略.........
开发者ID:HungTDO,项目名称:Android-Video-Player-FFmpeg-and-Bitmap,代码行数:101,


示例25: swri_rematrix_init

av_cold int swri_rematrix_init(SwrContext *s){    int i, j;    int nb_in  = av_get_channel_layout_nb_channels(s->in_ch_layout);    int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout);    s->mix_any_f = NULL;    if (!s->rematrix_custom) {        int r = auto_matrix(s);        if (r)            return r;    }    if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){        int maxsum = 0;        s->native_matrix = av_calloc(nb_in * nb_out, sizeof(int));        s->native_one    = av_mallocz(sizeof(int));        if (!s->native_matrix || !s->native_one)            return AVERROR(ENOMEM);        for (i = 0; i < nb_out; i++) {            double rem = 0;            int sum = 0;            for (j = 0; j < nb_in; j++) {                double target = s->matrix[i][j] * 32768 + rem;                ((int*)s->native_matrix)[i * nb_in + j] = lrintf(target);                rem += target - ((int*)s->native_matrix)[i * nb_in + j];                sum += FFABS(((int*)s->native_matrix)[i * nb_in + j]);            }            maxsum = FFMAX(maxsum, sum);        }        *((int*)s->native_one) = 32768;        if (maxsum <= 32768) {            s->mix_1_1_f = (mix_1_1_func_type*)copy_s16;            s->mix_2_1_f = (mix_2_1_func_type*)sum2_s16;            s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s16(s);        } else {            s->mix_1_1_f = (mix_1_1_func_type*)copy_clip_s16;            s->mix_2_1_f = (mix_2_1_func_type*)sum2_clip_s16;            s->mix_any_f = (mix_any_func_type*)get_mix_any_func_clip_s16(s);        }    }else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){        s->native_matrix = av_calloc(nb_in * nb_out, sizeof(float));        s->native_one    = av_mallocz(sizeof(float));        if (!s->native_matrix || !s->native_one)            return AVERROR(ENOMEM);        for (i = 0; i < nb_out; i++)            for (j = 0; j < nb_in; j++)                ((float*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j];        *((float*)s->native_one) = 1.0;        s->mix_1_1_f = (mix_1_1_func_type*)copy_float;        s->mix_2_1_f = (mix_2_1_func_type*)sum2_float;        s->mix_any_f = (mix_any_func_type*)get_mix_any_func_float(s);    }else if(s->midbuf.fmt == AV_SAMPLE_FMT_DBLP){        s->native_matrix = av_calloc(nb_in * nb_out, sizeof(double));        s->native_one    = av_mallocz(sizeof(double));        if (!s->native_matrix || !s->native_one)            return AVERROR(ENOMEM);        for (i = 0; i < nb_out; i++)            for (j = 0; j < nb_in; j++)                ((double*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j];        *((double*)s->native_one) = 1.0;        s->mix_1_1_f = (mix_1_1_func_type*)copy_double;        s->mix_2_1_f = (mix_2_1_func_type*)sum2_double;        s->mix_any_f = (mix_any_func_type*)get_mix_any_func_double(s);    }else if(s->midbuf.fmt == AV_SAMPLE_FMT_S32P){        s->native_one    = av_mallocz(sizeof(int));        if (!s->native_one)            return AVERROR(ENOMEM);        s->native_matrix = av_calloc(nb_in * nb_out, sizeof(int));        if (!s->native_matrix) {            av_freep(&s->native_one);            return AVERROR(ENOMEM);        }        for (i = 0; i < nb_out; i++) {            double rem = 0;            for (j = 0; j < nb_in; j++) {                double target = s->matrix[i][j] * 32768 + rem;                ((int*)s->native_matrix)[i * nb_in + j] = lrintf(target);                rem += target - ((int*)s->native_matrix)[i * nb_in + j];            }        }        *((int*)s->native_one) = 32768;        s->mix_1_1_f = (mix_1_1_func_type*)copy_s32;        s->mix_2_1_f = (mix_2_1_func_type*)sum2_s32;        s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s32(s);    }else        av_assert0(0);    //FIXME quantize for integeres    for (i = 0; i < SWR_CH_MAX; i++) {        int ch_in=0;        for (j = 0; j < SWR_CH_MAX; j++) {            s->matrix32[i][j]= lrintf(s->matrix[i][j] * 32768);            if(s->matrix[i][j])                s->matrix_ch[i][++ch_in]= j;        }        s->matrix_ch[i][0]= ch_in;    }    if(HAVE_X86ASM && HAVE_MMX)//.........这里部分代码省略.........
开发者ID:Emerica,项目名称:FFmpeg,代码行数:101,


示例26: mpeg1_encode_sequence_header

/* put sequence header if needed */static void mpeg1_encode_sequence_header(MpegEncContext *s){    unsigned int vbv_buffer_size, fps, v;    int i, constraint_parameter_flag;    uint64_t time_code;    int64_t best_aspect_error = INT64_MAX;    AVRational aspect_ratio = s->avctx->sample_aspect_ratio;    if (aspect_ratio.num == 0 || aspect_ratio.den == 0)        aspect_ratio = (AVRational){1,1};             // pixel aspect 1.1 (VGA)    if (s->current_picture.f->key_frame) {        AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];        /* mpeg1 header repeated every gop */        put_header(s, SEQ_START_CODE);        put_sbits(&s->pb, 12, s->width  & 0xFFF);        put_sbits(&s->pb, 12, s->height & 0xFFF);        for (i = 1; i < 15; i++) {            int64_t error = aspect_ratio.num * (1LL<<32) / aspect_ratio.den;            if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1)                error -= (1LL<<32) / ff_mpeg1_aspect[i];            else                error -= (1LL<<32)*ff_mpeg2_aspect[i].num * s->height / s->width / ff_mpeg2_aspect[i].den;            error = FFABS(error);            if (error - 2 <= best_aspect_error) {                best_aspect_error    = error;                s->aspect_ratio_info = i;            }        }        put_bits(&s->pb, 4, s->aspect_ratio_info);        put_bits(&s->pb, 4, s->frame_rate_index);        if (s->avctx->rc_max_rate) {            v = (s->avctx->rc_max_rate + 399) / 400;            if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)                v = 0x3ffff;        } else {            v = 0x3FFFF;        }        if (s->avctx->rc_buffer_size)            vbv_buffer_size = s->avctx->rc_buffer_size;        else            /* VBV calculation: Scaled so that a VCD has the proper             * VBV size of 40 kilobytes */            vbv_buffer_size = ((20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;        vbv_buffer_size = (vbv_buffer_size + 16383) / 16384;        put_sbits(&s->pb, 18, v);        put_bits(&s->pb, 1, 1);         // marker        put_sbits(&s->pb, 10, vbv_buffer_size);        constraint_parameter_flag =            s->width  <= 768                                    &&            s->height <= 576                                    &&            s->mb_width * s->mb_height                 <= 396   &&            s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den &&            framerate.num <= framerate.den * 30                 &&            s->avctx->me_range                                  &&            s->avctx->me_range < 128                            &&            vbv_buffer_size <= 20                               &&            v <= 1856000 / 400                                  &&            s->codec_id == AV_CODEC_ID_MPEG1VIDEO;        put_bits(&s->pb, 1, constraint_parameter_flag);        ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);        ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);        if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {            AVFrameSideData *side_data;            int width = s->width;            int height = s->height;            int use_seq_disp_ext;            put_header(s, EXT_START_CODE);            put_bits(&s->pb, 4, 1);                 // seq ext            put_bits(&s->pb, 1, s->avctx->profile == 0); // escx 1 for 4:2:2 profile            put_bits(&s->pb, 3, s->avctx->profile); // profile            put_bits(&s->pb, 4, s->avctx->level);   // level            put_bits(&s->pb, 1, s->progressive_sequence);            put_bits(&s->pb, 2, s->chroma_format);            put_bits(&s->pb, 2, s->width  >> 12);            put_bits(&s->pb, 2, s->height >> 12);            put_bits(&s->pb, 12, v >> 18);          // bitrate ext            put_bits(&s->pb, 1, 1);                 // marker            put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext            put_bits(&s->pb, 1, s->low_delay);            put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n            put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d//.........这里部分代码省略.........
开发者ID:309746069,项目名称:FFmpeg,代码行数:101,


示例27: pred_spatial_direct_motion

//.........这里部分代码省略.........            mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];            mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];            b8_stride      = 2 + 4 * h->mb_stride;            b4_stride     *= 6;            if (IS_INTERLACED(mb_type_col[0]) !=                IS_INTERLACED(mb_type_col[1])) {                mb_type_col[0] &= ~MB_TYPE_INTERLACED;                mb_type_col[1] &= ~MB_TYPE_INTERLACED;            }            sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */            if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&                (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&                !is_b8x8) {                *mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2;  /* B_16x8 */            } else {                *mb_type |= MB_TYPE_8x8;            }        } else {                                         //     AFR/FR    -> AFR/FRsingle_col:            mb_type_col[0] =            mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];            sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */            if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {                *mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */            } else if (!is_b8x8 &&                       (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {                *mb_type |= MB_TYPE_DIRECT2 |                            (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));            } else {                if (!h->ps.sps->direct_8x8_inference_flag) {                    /* FIXME: Save sub mb types from previous frames (or derive                     * from MVs) so we know exactly what block size to use. */                    sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */                }                *mb_type |= MB_TYPE_8x8;            }        }    }    await_reference_mb_row(h, &sl->ref_list[1][0], mb_y);    l1mv0  = (void*)&sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];    l1mv1  = (void*)&sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];    l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];    l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];    if (!b8_stride) {        if (sl->mb_y & 1) {            l1ref0 += 2;            l1ref1 += 2;            l1mv0  += 2 * b4_stride;            l1mv1  += 2 * b4_stride;        }    }    if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {        int n = 0;        for (i8 = 0; i8 < 4; i8++) {            int x8  = i8 & 1;            int y8  = i8 >> 1;            int xy8 = x8     + y8 * b8_stride;            int xy4 = x8 * 3 + y8 * b4_stride;            int a, b;            if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))                continue;            sl->sub_mb_type[i8] = sub_mb_type;            fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,                           (uint8_t)ref[0], 1);            fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,                           (uint8_t)ref[1], 1);            if (!IS_INTRA(mb_type_col[y8]) && !sl->ref_list[1][0].parent->long_ref &&                ((l1ref0[xy8] == 0 &&                  FFABS(l1mv0[xy4][0]) <= 1 &&                  FFABS(l1mv0[xy4][1]) <= 1) ||                 (l1ref0[xy8] < 0 &&                  l1ref1[xy8] == 0 &&                  FFABS(l1mv1[xy4][0]) <= 1 &&                  FFABS(l1mv1[xy4][1]) <= 1))) {                a =                b = 0;                if (ref[0] > 0)                    a = mv[0];                if (ref[1] > 0)                    b = mv[1];                n++;            } else {                a = mv[0];                b = mv[1];            }            fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4);            fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4);        }        if (!is_b8x8 && !(n & 3))            *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |                                     MB_TYPE_P1L0 | MB_TYPE_P1L1)) |                       MB_TYPE_16x16 | MB_TYPE_DIRECT2;    } else if (IS_16X16(*mb_type)) {
开发者ID:411697643,项目名称:FFmpeg,代码行数:101,


示例28: do_a_deblock_C

/** * accurate deblock filter */static av_always_inline void do_a_deblock_C(uint8_t *src, int step,                                            int stride, const PPContext *c, int mode){    int y;    const int QP= c->QP;    const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;    const int dcThreshold= dcOffset*2 + 1;//START_TIMER    src+= step*4; // src points to begin of the 8x8 Block    for(y=0; y<8; y++){        int numEq= 0;        numEq += ((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold;        numEq += ((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold;        if(numEq > c->ppMode.flatnessThreshold){            int min, max, x;            if(src[0] > src[step]){                max= src[0];                min= src[step];            }else{                max= src[step];                min= src[0];            }            for(x=2; x<8; x+=2){                if(src[x*step] > src[(x+1)*step]){                        if(src[x    *step] > max) max= src[ x   *step];                        if(src[(x+1)*step] < min) min= src[(x+1)*step];                }else{                        if(src[(x+1)*step] > max) max= src[(x+1)*step];                        if(src[ x   *step] < min) min= src[ x   *step];                }            }            if(max-min < 2*QP){                const int first= FFABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];                const int last= FFABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];                int sums[10];                sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;                sums[1] = sums[0] - first       + src[3*step];                sums[2] = sums[1] - first       + src[4*step];                sums[3] = sums[2] - first       + src[5*step];                sums[4] = sums[3] - first       + src[6*step];                sums[5] = sums[4] - src[0*step] + src[7*step];                sums[6] = sums[5] - src[1*step] + last;                sums[7] = sums[6] - src[2*step] + last;                sums[8] = sums[7] - src[3*step] + last;                sums[9] = sums[8] - src[4*step] + last;                if (mode & VISUALIZE) {                    src[0*step] =                    src[1*step] =                    src[2*step] =                    src[3*step] =                    src[4*step] =                    src[5*step] =                    src[6*step] =                    src[7*step] = 128;                }                src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4;                src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4;                src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4;                src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4;                src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4;                src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4;                src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4;                src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4;            }        }else{
开发者ID:0day-ci,项目名称:FFmpeg,代码行数:79,


示例29: encode_block

//.........这里部分代码省略.........                        return -1;                    coefs1[i] = lrint(t);                }            }        }    }    v = 0;    for (ch = 0; ch < s->avctx->channels; ch++) {        int a = s->channel_coded[ch];        put_bits(&s->pb, 1, a);        v |= a;    }    if (!v)        return 1;    for (v = total_gain - 1; v >= 127; v -= 127)        put_bits(&s->pb, 7, 127);    put_bits(&s->pb, 7, v);    coef_nb_bits = ff_wma_total_gain_to_bits(total_gain);    if (s->use_noise_coding) {        for (ch = 0; ch < s->avctx->channels; ch++) {            if (s->channel_coded[ch]) {                int i, n;                n = s->exponent_high_sizes[bsize];                for (i = 0; i < n; i++) {                    put_bits(&s->pb, 1, s->high_band_coded[ch][i] = 0);                    if (0)                        nb_coefs[ch] -= s->exponent_high_bands[bsize][i];                }            }        }    }    parse_exponents = 1;    if (s->block_len_bits != s->frame_len_bits)        put_bits(&s->pb, 1, parse_exponents);    if (parse_exponents) {        for (ch = 0; ch < s->avctx->channels; ch++) {            if (s->channel_coded[ch]) {                if (s->use_exp_vlc) {                    encode_exp_vlc(s, ch, fixed_exp);                } else {                    av_assert0(0); // FIXME not implemented//                    encode_exp_lsp(s, ch);                }            }        }    } else        av_assert0(0); // FIXME not implemented    for (ch = 0; ch < s->avctx->channels; ch++) {        if (s->channel_coded[ch]) {            int run, tindex;            WMACoef *ptr, *eptr;            tindex = (ch == 1 && s->ms_stereo);            ptr    = &s->coefs1[ch][0];            eptr   = ptr + nb_coefs[ch];            run = 0;            for (; ptr < eptr; ptr++) {                if (*ptr) {                    int level     = *ptr;                    int abs_level = FFABS(level);                    int code      = 0;                    if (abs_level <= s->coef_vlcs[tindex]->max_level)                        if (run < s->coef_vlcs[tindex]->levels[abs_level - 1])                            code = run + s->int_table[tindex][abs_level - 1];                    av_assert2(code < s->coef_vlcs[tindex]->n);                    put_bits(&s->pb, s->coef_vlcs[tindex]->huffbits[code],                             s->coef_vlcs[tindex]->huffcodes[code]);                    if (code == 0) {                        if (1 << coef_nb_bits <= abs_level)                            return -1;                        put_bits(&s->pb, coef_nb_bits, abs_level);                        put_bits(&s->pb, s->frame_len_bits, run);                    }                    // FIXME the sign is flipped somewhere                    put_bits(&s->pb, 1, level < 0);                    run = 0;                } else                    run++;            }            if (run)                put_bits(&s->pb, s->coef_vlcs[tindex]->huffbits[1],                         s->coef_vlcs[tindex]->huffcodes[1]);        }        if (s->version == 1 && s->avctx->channels >= 2)            avpriv_align_put_bits(&s->pb);    }    return 0;}
开发者ID:15806905685,项目名称:FFmpeg,代码行数:101,


示例30: filter_frame

//.........这里部分代码省略.........            }            for (i = 0; i < 256; i++)                h->max_hval = FFMAX(h->max_hval, h->histogram[i]);            for (i = 0; i < outlink->w; i++) {                int col_height = h->level_height - (float)h->histogram[i] / h->max_hval * h->level_height;                for (j = h->level_height - 1; j >= col_height; j--) {                    if (h->display_mode) {                        for (l = 0; l < h->ncomp; l++)                            out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];                    } else {                        out->data[k][(j + start) * out->linesize[k] + i] = 255;                    }                }                for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)                    out->data[k][(j + start) * out->linesize[k] + i] = i;            }            memset(h->histogram, 0, 256 * sizeof(unsigned));            h->max_hval = 0;        }        break;    case MODE_WAVEFORM:        if (h->waveform_mode) {            for (k = 0; k < h->ncomp; k++) {                int offset = k * 256 * h->display_mode;                for (i = 0; i < inlink->w; i++) {                    for (j = 0; j < inlink->h; j++) {                        int pos = (offset +                                   in->data[k][j * in->linesize[k] + i]) *                                  out->linesize[k] + i;                        unsigned value = out->data[k][pos];                        value = FFMIN(value + h->step, 255);                        out->data[k][pos] = value;                    }                }            }        } else {            for (k = 0; k < h->ncomp; k++) {                int offset = k * 256 * h->display_mode;                for (i = 0; i < inlink->h; i++) {                    src = in ->data[k] + i * in ->linesize[k];                    dst = out->data[k] + i * out->linesize[k];                    for (j = 0; j < inlink->w; j++) {                        int pos = src[j] + offset;                        unsigned value = dst[pos];                        value = FFMIN(value + h->step, 255);                        dst[pos] = value;                    }                }            }        }        break;    case MODE_COLOR:        for (i = 0; i < inlink->h; i++) {            int iw1 = i * in->linesize[1];            int iw2 = i * in->linesize[2];            for (j = 0; j < inlink->w; j++) {                int pos = in->data[1][iw1 + j] * out->linesize[0] + in->data[2][iw2 + j];                if (out->data[0][pos] < 255)                    out->data[0][pos]++;            }        }        for (i = 0; i < 256; i++) {            dst = out->data[0] + i * out->linesize[0];            for (j = 0; j < 256; j++) {                if (!dst[j]) {                    out->data[1][i * out->linesize[0] + j] = i;                    out->data[2][i * out->linesize[0] + j] = j;                }            }        }        break;    case MODE_COLOR2:        for (i = 0; i < inlink->h; i++) {            int iw1 = i * in->linesize[1];            int iw2 = i * in->linesize[2];            for (j = 0; j < inlink->w; j++) {                int u = in->data[1][iw1 + j];                int v = in->data[2][iw2 + j];                int pos = u * out->linesize[0] + v;                if (!out->data[0][pos])                    out->data[0][pos] = FFABS(128 - u) + FFABS(128 - v);                out->data[1][pos] = u;                out->data[2][pos] = v;            }        }        break;    default:        av_assert0(0);    }    ret = ff_filter_frame(outlink, out);    av_frame_free(&in);    if (ret < 0)        return ret;    return 0;}
开发者ID:assaflavie,项目名称:FFmpeg,代码行数:101,



注:本文中的FFABS函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ FFALIGN函数代码示例
C++ FF函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。