这篇教程C++ FFMAX函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中FFMAX函数的典型用法代码示例。如果您正苦于以下问题:C++ FFMAX函数的具体用法?C++ FFMAX怎么用?C++ FFMAX使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了FFMAX函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: libopenjpeg_decode_framestatic int libopenjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibOpenJPEGContext *ctx = avctx->priv_data; AVFrame *picture = &ctx->image, *output = data; opj_dinfo_t *dec; opj_cio_t *stream; opj_image_t *image; int width, height, has_alpha = 0, ret = -1; int x, y, index; uint8_t *img_ptr; int adjust[4]; *data_size = 0; // Check if input is a raw jpeg2k codestream or in jp2 wrapping if((AV_RB32(buf) == 12) && (AV_RB32(buf + 4) == JP2_SIG_TYPE) && (AV_RB32(buf + 8) == JP2_SIG_VALUE)) { dec = opj_create_decompress(CODEC_JP2); } else { // If the AVPacket contains a jp2c box, then skip to // the starting byte of the codestream. if (AV_RB32(buf + 4) == AV_RB32("jp2c")) buf += 8; dec = opj_create_decompress(CODEC_J2K); } if(!dec) { av_log(avctx, AV_LOG_ERROR, "Error initializing decoder./n"); return -1; } opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL); ctx->dec_params.cp_reduce = avctx->lowres; // Tie decoder with decoding parameters opj_setup_decoder(dec, &ctx->dec_params); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); if(!stream) { av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading./n"); opj_destroy_decompress(dec); return -1; } // Decode the codestream image = opj_decode_with_info(dec, stream, NULL); opj_cio_close(stream); if(!image) { av_log(avctx, AV_LOG_ERROR, "Error decoding codestream./n"); opj_destroy_decompress(dec); return -1; } width = image->comps[0].w << avctx->lowres; height = image->comps[0].h << avctx->lowres; if(avcodec_check_dimensions(avctx, width, height) < 0) { av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid./n", width, height); goto done; } avcodec_set_dimensions(avctx, width, height); switch(image->numcomps) { case 1: avctx->pix_fmt = PIX_FMT_GRAY8; break; case 3: if(check_image_attributes(image)) { avctx->pix_fmt = PIX_FMT_RGB24; } else { avctx->pix_fmt = PIX_FMT_GRAY8; av_log(avctx, AV_LOG_ERROR, "Only first component will be used./n"); } break; case 4: has_alpha = 1; avctx->pix_fmt = PIX_FMT_RGBA; break; default: av_log(avctx, AV_LOG_ERROR, "%d components unsupported./n", image->numcomps); goto done; } if(picture->data[0]) avctx->release_buffer(avctx, picture); if(avctx->get_buffer(avctx, picture) < 0) { av_log(avctx, AV_LOG_ERROR, "Couldn't allocate image buffer./n"); return -1; } for(x = 0; x < image->numcomps; x++) { adjust[x] = FFMAX(image->comps[x].prec - 8, 0); } for(y = 0; y < avctx->height; y++) { index = y*avctx->width; img_ptr = picture->data[0] + y*picture->linesize[0];//.........这里部分代码省略.........
开发者ID:JDsolution,项目名称:ipnc,代码行数:101,
示例2: libopenjpeg_encode_initstatic av_cold int libopenjpeg_encode_init(AVCodecContext *avctx){ LibOpenJPEGContext *ctx = avctx->priv_data; int err = AVERROR(ENOMEM); opj_set_default_encoder_parameters(&ctx->enc_params); ctx->enc_params.cp_rsiz = ctx->profile; ctx->enc_params.mode = !!avctx->global_quality; ctx->enc_params.cp_cinema = ctx->cinema_mode; ctx->enc_params.prog_order = ctx->prog_order; ctx->enc_params.numresolution = ctx->numresolution; ctx->enc_params.cp_disto_alloc = ctx->disto_alloc; ctx->enc_params.cp_fixed_alloc = ctx->fixed_alloc; ctx->enc_params.cp_fixed_quality = ctx->fixed_quality; ctx->enc_params.tcp_numlayers = ctx->numlayers; ctx->enc_params.tcp_rates[0] = FFMAX(avctx->compression_level, 0) * 2; if (ctx->cinema_mode > 0) { ctx->enc_params.irreversible = 1; ctx->enc_params.tcp_mct = 1; ctx->enc_params.tile_size_on = 0; /* no subsampling */ ctx->enc_params.cp_tdx=1; ctx->enc_params.cp_tdy=1; ctx->enc_params.subsampling_dx = 1; ctx->enc_params.subsampling_dy = 1; /* Tile and Image shall be at (0,0) */ ctx->enc_params.cp_tx0 = 0; ctx->enc_params.cp_ty0 = 0; ctx->enc_params.image_offset_x0 = 0; ctx->enc_params.image_offset_y0 = 0; /* Codeblock size= 32*32 */ ctx->enc_params.cblockw_init = 32; ctx->enc_params.cblockh_init = 32; ctx->enc_params.csty |= 0x01; /* No ROI */ ctx->enc_params.roi_compno = -1; if (ctx->enc_params.prog_order != CPRL) { av_log(avctx, AV_LOG_ERROR, "prog_order forced to CPRL/n"); ctx->enc_params.prog_order = CPRL; } ctx->enc_params.tp_flag = 'C'; ctx->enc_params.tp_on = 1; } ctx->compress = opj_create_compress(ctx->format); if (!ctx->compress) { av_log(avctx, AV_LOG_ERROR, "Error creating the compressor/n"); return AVERROR(ENOMEM); } ctx->image = mj2_create_image(avctx, &ctx->enc_params); if (!ctx->image) { av_log(avctx, AV_LOG_ERROR, "Error creating the mj2 image/n"); err = AVERROR(EINVAL); goto fail; } opj_setup_encoder(ctx->compress, &ctx->enc_params, ctx->image); ctx->stream = opj_cio_open((opj_common_ptr) ctx->compress, NULL, 0); if (!ctx->stream) { av_log(avctx, AV_LOG_ERROR, "Error creating the cio stream/n"); err = AVERROR(ENOMEM); goto fail; } avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, "Error allocating coded frame/n"); goto fail; } memset(&ctx->event_mgr, 0, sizeof(opj_event_mgr_t)); ctx->event_mgr.info_handler = info_callback; ctx->event_mgr.error_handler = error_callback; ctx->event_mgr.warning_handler = warning_callback; opj_set_event_mgr((opj_common_ptr) ctx->compress, &ctx->event_mgr, avctx); return 0;fail: opj_cio_close(ctx->stream); ctx->stream = NULL; opj_destroy_compress(ctx->compress); ctx->compress = NULL; opj_image_destroy(ctx->image); ctx->image = NULL; av_freep(&avctx->coded_frame); return err;}
开发者ID:BlackOmega,项目名称:FFmpeg,代码行数:92,
示例3: vectorscope//.........这里部分代码省略......... dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255); dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255); dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255); if (dst[3]) dst[3][pos] = 255; } } } break; case COLOR2: if (s->is_yuv) { for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int pos = y * dlinesize + x; if (!dpd[pos]) dpd[pos] = FFABS(128 - x) + FFABS(128 - y); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } } else { for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int pos = y * dlinesize + x; if (!dpd[pos]) dpd[pos] = FFMIN(x + y, 255); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } } break; case COLOR3: for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int pos = y * dlinesize + x; dpd[pos] = FFMIN(255, dpd[pos] + intensity); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } break; case COLOR4: for (i = 0; i < in->height; i++) { const int iwx = (i >> vsub) * slinesizex; const int iwy = (i >> vsub) * slinesizey; const int iwd = i * slinesized; for (j = 0; j < in->width; j++) { const int x = spx[iwx + (j >> hsub)]; const int y = spy[iwy + (j >> hsub)]; const int pos = y * dlinesize + x; dpd[pos] = FFMAX(spd[iwd + j], dpd[pos]); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } break; default: av_assert0(0); } envelope(s, out); if (s->mode == COLOR) { for (i = 0; i < out->height; i++) { for (j = 0; j < out->width; j++) { if (!dpd[i * out->linesize[pd] + j]) { dpx[i * out->linesize[px] + j] = j; dpy[i * out->linesize[py] + j] = i; dpd[i * out->linesize[pd] + j] = 128; } } } }}
开发者ID:jladan,项目名称:FFmpeg,代码行数:101,
示例4: ff_adaptive_quantizationfloat ff_adaptive_quantization(MpegEncContext *s, double q){ int i; const float lumi_masking= s->avctx->lumi_masking / (128.0*128.0); const float dark_masking= s->avctx->dark_masking / (128.0*128.0); const float temp_cplx_masking= s->avctx->temporal_cplx_masking; const float spatial_cplx_masking = s->avctx->spatial_cplx_masking; const float p_masking = s->avctx->p_masking; const float border_masking = s->avctx->border_masking; float bits_sum= 0.0; float cplx_sum= 0.0;#if __STDC_VERSION__ >= 199901L float cplx_tab[s->mb_num]; float bits_tab[s->mb_num];#else float *cplx_tab=_alloca(sizeof(float)*s->mb_num); float *bits_tab=_alloca(sizeof(float)*s->mb_num);#endif const int qmin= s->avctx->mb_lmin; const int qmax= s->avctx->mb_lmax; Picture * const pic= &s->current_picture; const int mb_width = s->mb_width; const int mb_height = s->mb_height; float avg= 0.0; for(i=0; i<s->mb_num; i++){ const int mb_xy= s->mb_index2xy[i]; float temp_cplx= sqrt(pic->mc_mb_var[mb_xy]); //FIXME merge in pow() float spat_cplx= sqrt(pic->mb_var[mb_xy]); const int lumi= pic->mb_mean[mb_xy]; float bits, cplx, factor; int mb_x = mb_xy % s->mb_stride; int mb_y = mb_xy / s->mb_stride; int mb_distance; float mb_factor = 0.0;#if 0 if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune if(temp_cplx < q/3) temp_cplx= q/3; //FIXME finetune#endif if(spat_cplx < 4) spat_cplx= 4; //FIXME finetune if(temp_cplx < 4) temp_cplx= 4; //FIXME finetune if((s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTRA)){//FIXME hq mode cplx= spat_cplx; factor= 1.0 + p_masking; }else{ cplx= temp_cplx; factor= pow(temp_cplx, - temp_cplx_masking); } factor*=pow(spat_cplx, - spatial_cplx_masking); if(lumi>127) factor*= (1.0 - (lumi-128)*(lumi-128)*lumi_masking); else factor*= (1.0 - (lumi-128)*(lumi-128)*dark_masking); if(mb_x < mb_width/5){ mb_distance = mb_width/5 - mb_x; mb_factor = (float)mb_distance / (float)(mb_width/5); }else if(mb_x > 4*mb_width/5){ mb_distance = mb_x - 4*mb_width/5; mb_factor = (float)mb_distance / (float)(mb_width/5); } if(mb_y < mb_height/5){ mb_distance = mb_height/5 - mb_y; mb_factor = FFMAX(mb_factor, (float)mb_distance / (float)(mb_height/5)); }else if(mb_y > 4*mb_height/5){ mb_distance = mb_y - 4*mb_height/5; mb_factor = FFMAX(mb_factor, (float)mb_distance / (float)(mb_height/5)); } factor*= 1.0 - border_masking*mb_factor; if(factor<0.00001) factor= 0.00001; bits= cplx*factor; cplx_sum+= cplx; bits_sum+= bits; cplx_tab[i]= cplx; bits_tab[i]= bits; } /* handle qmin/qmax clipping */ if(s->flags&CODEC_FLAG_NORMALIZE_AQP){ float factor= bits_sum/cplx_sum; for(i=0; i<s->mb_num; i++){ float newq= q*cplx_tab[i]/bits_tab[i]; newq*= factor; if (newq > qmax){ bits_sum -= bits_tab[i]; cplx_sum -= cplx_tab[i]*q/qmax; } else if(newq < qmin){ bits_sum -= bits_tab[i]; cplx_sum -= cplx_tab[i]*q/qmin; } } if(bits_sum < 0.001) bits_sum= 0.001; if(cplx_sum < 0.001) cplx_sum= 0.001; }//.........这里部分代码省略.........
开发者ID:Fluffiest,项目名称:splayer,代码行数:101,
示例5: av_probe_input_buffer2int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size){ AVProbeData pd = { filename ? filename : "" }; uint8_t *buf = NULL; int ret = 0, probe_size, buf_offset = 0; int score = 0; int ret2; if (!max_probe_size) max_probe_size = PROBE_BUF_MAX; else if (max_probe_size < PROBE_BUF_MIN) { av_log(logctx, AV_LOG_ERROR, "Specified probe size value %u cannot be < %u/n", max_probe_size, PROBE_BUF_MIN); return AVERROR(EINVAL); } if (offset >= max_probe_size) return AVERROR(EINVAL); if (pb->av_class) { uint8_t *mime_type_opt = NULL; av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt); pd.mime_type = (const char *)mime_type_opt; }#if 0 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) { if (!av_strcasecmp(mime_type, "audio/aacp")) { *fmt = av_find_input_format("aac"); } av_freep(&mime_type); }#endif for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt; probe_size = FFMIN(probe_size << 1, FFMAX(max_probe_size, probe_size + 1))) { score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; /* Read probe data. */ if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0) goto fail; if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { /* Fail if error was not end of file, otherwise, lower score. */ if (ret != AVERROR_EOF) goto fail; score = 0; ret = 0; /* error was end of file, nothing read */ } buf_offset += ret; if (buf_offset < offset) continue; pd.buf_size = buf_offset - offset; pd.buf = &buf[offset]; memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* Guess file format. */ *fmt = av_probe_input_format2(&pd, 1, &score); if (*fmt) { /* This can only be true in the last iteration. */ if (score <= AVPROBE_SCORE_RETRY) { av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, " "misdetection possible!/n", (*fmt)->name, score); } else av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d/n", (*fmt)->name, probe_size, score);#if 0 FILE *f = fopen("probestat.tmp", "ab"); fprintf(f, "probe_size:%d format:%s score:%d filename:%s/n", probe_size, (*fmt)->name, score, filename); fclose(f);#endif } } if (!*fmt) ret = AVERROR_INVALIDDATA;fail: /* Rewind. Reuse probe buffer to avoid seeking. */ ret2 = ffio_rewind_with_probe_data(pb, &buf, buf_offset); if (ret >= 0) ret = ret2; av_freep(&pd.mime_type); return ret < 0 ? ret : score;}
开发者ID:309746069,项目名称:FFmpeg,代码行数:92,
示例6: auto_matrix//.........这里部分代码省略......... }else{ matrix[ SIDE_LEFT][ BACK_LEFT]+= 1.0; matrix[SIDE_RIGHT][BACK_RIGHT]+= 1.0; } }else if(s->out_ch_layout & AV_CH_FRONT_LEFT){ matrix[ FRONT_LEFT][ BACK_LEFT]+= s->slev; matrix[FRONT_RIGHT][BACK_RIGHT]+= s->slev; }else if(s->out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][BACK_LEFT ]+= s->slev*M_SQRT1_2; matrix[ FRONT_CENTER][BACK_RIGHT]+= s->slev*M_SQRT1_2; }else av_assert0(0); } if(unaccounted & AV_CH_SIDE_LEFT){ if(s->out_ch_layout & AV_CH_BACK_LEFT){ /* if back channels do not exist in the input, just copy side channels to back channels, otherwise mix side into back */ if (s->in_ch_layout & AV_CH_BACK_LEFT) { matrix[BACK_LEFT ][SIDE_LEFT ] += M_SQRT1_2; matrix[BACK_RIGHT][SIDE_RIGHT] += M_SQRT1_2; } else { matrix[BACK_LEFT ][SIDE_LEFT ] += 1.0; matrix[BACK_RIGHT][SIDE_RIGHT] += 1.0; } }else if(s->out_ch_layout & AV_CH_BACK_CENTER){ matrix[BACK_CENTER][ SIDE_LEFT]+= M_SQRT1_2; matrix[BACK_CENTER][SIDE_RIGHT]+= M_SQRT1_2; }else if(s->out_ch_layout & AV_CH_FRONT_LEFT){ matrix[ FRONT_LEFT][ SIDE_LEFT]+= s->slev; matrix[FRONT_RIGHT][SIDE_RIGHT]+= s->slev; }else if(s->out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][SIDE_LEFT ]+= s->slev*M_SQRT1_2; matrix[ FRONT_CENTER][SIDE_RIGHT]+= s->slev*M_SQRT1_2; }else av_assert0(0); } if(unaccounted & AV_CH_FRONT_LEFT_OF_CENTER){ if(s->out_ch_layout & AV_CH_FRONT_LEFT){ matrix[ FRONT_LEFT][ FRONT_LEFT_OF_CENTER]+= 1.0; matrix[FRONT_RIGHT][FRONT_RIGHT_OF_CENTER]+= 1.0; }else if(s->out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][ FRONT_LEFT_OF_CENTER]+= M_SQRT1_2; matrix[ FRONT_CENTER][FRONT_RIGHT_OF_CENTER]+= M_SQRT1_2; }else av_assert0(0); } /* mix LFE into front left/right or center */ if (unaccounted & AV_CH_LOW_FREQUENCY) { if (s->out_ch_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][LOW_FREQUENCY] += s->lfe_mix_level; } else if (s->out_ch_layout & AV_CH_FRONT_LEFT) { matrix[FRONT_LEFT ][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2; } else av_assert0(0); } for(out_i=i=0; i<64; i++){ double sum=0; int in_i=0; for(j=0; j<64; j++){ s->matrix[out_i][in_i]= matrix[i][j]; if(matrix[i][j]){ sum += fabs(matrix[i][j]); } if(s->in_ch_layout & (1ULL<<j)) in_i++; } maxcoef= FFMAX(maxcoef, sum); if(s->out_ch_layout & (1ULL<<i)) out_i++; } if(s->rematrix_volume < 0) maxcoef = -s->rematrix_volume; if(( av_get_packed_sample_fmt(s->out_sample_fmt) < AV_SAMPLE_FMT_FLT || av_get_packed_sample_fmt(s->int_sample_fmt) < AV_SAMPLE_FMT_FLT) && maxcoef > 1.0){ for(i=0; i<SWR_CH_MAX; i++) for(j=0; j<SWR_CH_MAX; j++){ s->matrix[i][j] /= maxcoef; } } if(s->rematrix_volume > 0){ for(i=0; i<SWR_CH_MAX; i++) for(j=0; j<SWR_CH_MAX; j++){ s->matrix[i][j] *= s->rematrix_volume; } } for(i=0; i<av_get_channel_layout_nb_channels(s->out_ch_layout); i++){ for(j=0; j<av_get_channel_layout_nb_channels(s->in_ch_layout); j++){ av_log(NULL, AV_LOG_DEBUG, "%f ", s->matrix[i][j]); } av_log(NULL, AV_LOG_DEBUG, "/n"); } return 0;}
开发者ID:9aa5,项目名称:FFmpeg,代码行数:101,
示例7: apply_delogo/** * Apply a simple delogo algorithm to the image in dst and put the * result in src. * * The algorithm is only applied to the region specified by the logo * parameters. * * @param w width of the input image * @param h height of the input image * @param logo_x x coordinate of the top left corner of the logo region * @param logo_y y coordinate of the top left corner of the logo region * @param logo_w width of the logo * @param logo_h height of the logo * @param band the size of the band around the processed area * @param show show a rectangle around the processed area, useful for * parameters tweaking * @param direct if non-zero perform in-place processing */static void apply_delogo(uint8_t *dst, int dst_linesize, uint8_t *src, int src_linesize, int w, int h, int logo_x, int logo_y, int logo_w, int logo_h, int band, int show, int direct){ int x, y; int interp, dist; uint8_t *xdst, *xsrc; uint8_t *topleft, *botleft, *topright; int xclipl, xclipr, yclipt, yclipb; int logo_x1, logo_x2, logo_y1, logo_y2; xclipl = FFMAX(-logo_x, 0); xclipr = FFMAX(logo_x+logo_w-w, 0); yclipt = FFMAX(-logo_y, 0); yclipb = FFMAX(logo_y+logo_h-h, 0); logo_x1 = logo_x + xclipl; logo_x2 = logo_x + logo_w - xclipr; logo_y1 = logo_y + yclipt; logo_y2 = logo_y + logo_h - yclipb; topleft = src+logo_y1 * src_linesize+logo_x1; topright = src+logo_y1 * src_linesize+logo_x2-1; botleft = src+(logo_y2-1) * src_linesize+logo_x1; dst += (logo_y1+1)*dst_linesize; src += (logo_y1+1)*src_linesize; if (!direct) av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h); for (y = logo_y1+1; y < logo_y2-1; y++) { for (x = logo_x1+1, xdst = dst+logo_x1+1, xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) { interp = (topleft[src_linesize*(y-logo_y -yclipt)] + topleft[src_linesize*(y-logo_y-1-yclipt)] + topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w + (topright[src_linesize*(y-logo_y-yclipt)] + topright[src_linesize*(y-logo_y-1-yclipt)] + topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w + (topleft[x-logo_x-xclipl] + topleft[x-logo_x-1-xclipl] + topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h + (botleft[x-logo_x-xclipl] + botleft[x-logo_x-1-xclipl] + botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h; interp /= 6; if (y >= logo_y+band && y < logo_y+logo_h-band && x >= logo_x+band && x < logo_x+logo_w-band) { *xdst = interp; } else { dist = 0; if (x < logo_x+band) dist = FFMAX(dist, logo_x-x+band); else if (x >= logo_x+logo_w-band) dist = FFMAX(dist, x-(logo_x+logo_w-1-band)); if (y < logo_y+band) dist = FFMAX(dist, logo_y-y+band); else if (y >= logo_y+logo_h-band) dist = FFMAX(dist, y-(logo_y+logo_h-1-band)); *xdst = (*xsrc*dist + interp*(band-dist))/band; if (show && (dist == band-1)) *xdst = 0; } } dst += dst_linesize; src += src_linesize; }}
开发者ID:nikkpap,项目名称:SPMC,代码行数:99,
示例8: mark_pnsstatic void mark_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelElement *sce){ FFPsyBand *band; int w, g, w2; int wlen = 1024 / sce->ics.num_windows; int bandwidth, cutoff; const float lambda = s->lambda; const float freq_mult = avctx->sample_rate*0.5f/wlen; const float spread_threshold = FFMIN(0.75f, NOISE_SPREAD_THRESHOLD*FFMAX(0.5f, lambda/100.f)); const float pns_transient_energy_r = FFMIN(0.7f, lambda / 140.f); int refbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / ((avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : avctx->channels) * (lambda / 120.f); /** Keep this in sync with twoloop's cutoff selection */ float rate_bandwidth_multiplier = 1.5f; int frame_bit_rate = (avctx->flags & CODEC_FLAG_QSCALE) ? (refbits * rate_bandwidth_multiplier * avctx->sample_rate / 1024) : (avctx->bit_rate / avctx->channels); frame_bit_rate *= 1.15f; if (avctx->cutoff > 0) { bandwidth = avctx->cutoff; } else { bandwidth = FFMAX(3000, AAC_CUTOFF_FROM_BITRATE(frame_bit_rate, 1, avctx->sample_rate)); } cutoff = bandwidth * 2 * wlen / avctx->sample_rate; memcpy(sce->band_alt, sce->band_type, sizeof(sce->band_type)); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (g = 0; g < sce->ics.num_swb; g++) { float sfb_energy = 0.0f, threshold = 0.0f, spread = 2.0f; float min_energy = -1.0f, max_energy = 0.0f; const int start = sce->ics.swb_offset[g]; const float freq = start*freq_mult; const float freq_boost = FFMAX(0.88f*freq/NOISE_LOW_LIMIT, 1.0f); if (freq < NOISE_LOW_LIMIT || start >= cutoff) { sce->can_pns[w*16+g] = 0; continue; } for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; sfb_energy += band->energy; spread = FFMIN(spread, band->spread); threshold += band->threshold; if (!w2) { min_energy = max_energy = band->energy; } else { min_energy = FFMIN(min_energy, band->energy); max_energy = FFMAX(max_energy, band->energy); } } /* PNS is acceptable when all of these are true: * 1. high spread energy (noise-like band) * 2. near-threshold energy (high PE means the random nature of PNS content will be noticed) * 3. on short window groups, all windows have similar energy (variations in energy would be destroyed by PNS) */ sce->pns_ener[w*16+g] = sfb_energy; if (sfb_energy < threshold*sqrtf(1.5f/freq_boost) || spread < spread_threshold || min_energy < pns_transient_energy_r * max_energy) { sce->can_pns[w*16+g] = 0; } else { sce->can_pns[w*16+g] = 1; } } }}
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:70,
示例9: search_for_msstatic void search_for_ms(AACEncContext *s, ChannelElement *cpe){ int start = 0, i, w, w2, g, sid_sf_boost, prev_mid, prev_side; uint8_t nextband0[128], nextband1[128]; float *M = s->scoefs + 128*0, *S = s->scoefs + 128*1; float *L34 = s->scoefs + 128*2, *R34 = s->scoefs + 128*3; float *M34 = s->scoefs + 128*4, *S34 = s->scoefs + 128*5; const float lambda = s->lambda; const float mslambda = FFMIN(1.0f, lambda / 120.f); SingleChannelElement *sce0 = &cpe->ch[0]; SingleChannelElement *sce1 = &cpe->ch[1]; if (!cpe->common_window) return; /** Scout out next nonzero bands */ ff_init_nextband_map(sce0, nextband0); ff_init_nextband_map(sce1, nextband1); prev_mid = sce0->sf_idx[0]; prev_side = sce1->sf_idx[0]; for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) { start = 0; for (g = 0; g < sce0->ics.num_swb; g++) { float bmax = bval2bmax(g * 17.0f / sce0->ics.num_swb) / 0.0045f; if (!cpe->is_mask[w*16+g]) cpe->ms_mask[w*16+g] = 0; if (!sce0->zeroes[w*16+g] && !sce1->zeroes[w*16+g] && !cpe->is_mask[w*16+g]) { float Mmax = 0.0f, Smax = 0.0f; /* Must compute mid/side SF and book for the whole window group */ for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { for (i = 0; i < sce0->ics.swb_sizes[g]; i++) { M[i] = (sce0->coeffs[start+(w+w2)*128+i] + sce1->coeffs[start+(w+w2)*128+i]) * 0.5; S[i] = M[i] - sce1->coeffs[start+(w+w2)*128+i]; } s->abs_pow34(M34, M, sce0->ics.swb_sizes[g]); s->abs_pow34(S34, S, sce0->ics.swb_sizes[g]); for (i = 0; i < sce0->ics.swb_sizes[g]; i++ ) { Mmax = FFMAX(Mmax, M34[i]); Smax = FFMAX(Smax, S34[i]); } } for (sid_sf_boost = 0; sid_sf_boost < 4; sid_sf_boost++) { float dist1 = 0.0f, dist2 = 0.0f; int B0 = 0, B1 = 0; int minidx; int mididx, sididx; int midcb, sidcb; minidx = FFMIN(sce0->sf_idx[w*16+g], sce1->sf_idx[w*16+g]); mididx = av_clip(minidx, 0, SCALE_MAX_POS - SCALE_DIV_512); sididx = av_clip(minidx - sid_sf_boost * 3, 0, SCALE_MAX_POS - SCALE_DIV_512); if (sce0->band_type[w*16+g] != NOISE_BT && sce1->band_type[w*16+g] != NOISE_BT && ( !ff_sfdelta_can_replace(sce0, nextband0, prev_mid, mididx, w*16+g) || !ff_sfdelta_can_replace(sce1, nextband1, prev_side, sididx, w*16+g))) { /* scalefactor range violation, bad stuff, will decrease quality unacceptably */ continue; } midcb = find_min_book(Mmax, mididx); sidcb = find_min_book(Smax, sididx); /* No CB can be zero */ midcb = FFMAX(1,midcb); sidcb = FFMAX(1,sidcb); for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { FFPsyBand *band0 = &s->psy.ch[s->cur_channel+0].psy_bands[(w+w2)*16+g]; FFPsyBand *band1 = &s->psy.ch[s->cur_channel+1].psy_bands[(w+w2)*16+g]; float minthr = FFMIN(band0->threshold, band1->threshold); int b1,b2,b3,b4; for (i = 0; i < sce0->ics.swb_sizes[g]; i++) { M[i] = (sce0->coeffs[start+(w+w2)*128+i] + sce1->coeffs[start+(w+w2)*128+i]) * 0.5; S[i] = M[i] - sce1->coeffs[start+(w+w2)*128+i]; } s->abs_pow34(L34, sce0->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]); s->abs_pow34(R34, sce1->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]); s->abs_pow34(M34, M, sce0->ics.swb_sizes[g]); s->abs_pow34(S34, S, sce0->ics.swb_sizes[g]); dist1 += quantize_band_cost(s, &sce0->coeffs[start + (w+w2)*128], L34, sce0->ics.swb_sizes[g], sce0->sf_idx[w*16+g], sce0->band_type[w*16+g], lambda / band0->threshold, INFINITY, &b1, NULL, 0); dist1 += quantize_band_cost(s, &sce1->coeffs[start + (w+w2)*128], R34, sce1->ics.swb_sizes[g], sce1->sf_idx[w*16+g], sce1->band_type[w*16+g], lambda / band1->threshold, INFINITY, &b2, NULL, 0); dist2 += quantize_band_cost(s, M, M34, sce0->ics.swb_sizes[g],//.........这里部分代码省略.........
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:101,
示例10: search_for_quantizers_anmrstatic void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda){ int q, w, w2, g, start = 0; int i, j; int idx; TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES]; int bandaddr[TRELLIS_STAGES]; int minq; float mincost; float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f; int q0, q1, qcnt = 0; for (i = 0; i < 1024; i++) { float t = fabsf(sce->coeffs[i]); if (t > 0.0f) { q0f = FFMIN(q0f, t); q1f = FFMAX(q1f, t); qnrgf += t*t; qcnt++; } } if (!qcnt) { memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); memset(sce->zeroes, 1, sizeof(sce->zeroes)); return; } //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped q0 = av_clip(coef2minsf(q0f), 0, SCALE_MAX_POS-1); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero q1 = av_clip(coef2maxsf(q1f), 1, SCALE_MAX_POS); if (q1 - q0 > 60) { int q0low = q0; int q1high = q1; //minimum scalefactor index is when maximum nonzero coefficient after quantizing is not clipped int qnrg = av_clip_uint8(log2f(sqrtf(qnrgf/qcnt))*4 - 31 + SCALE_ONE_POS - SCALE_DIV_512); q1 = qnrg + 30; q0 = qnrg - 30; if (q0 < q0low) { q1 += q0low - q0; q0 = q0low; } else if (q1 > q1high) { q0 -= q1 - q1high; q1 = q1high; } } // q0 == q1 isn't really a legal situation if (q0 == q1) { // the following is indirect but guarantees q1 != q0 && q1 near q0 q1 = av_clip(q0+1, 1, SCALE_MAX_POS); q0 = av_clip(q1-1, 0, SCALE_MAX_POS - 1); } for (i = 0; i < TRELLIS_STATES; i++) { paths[0][i].cost = 0.0f; paths[0][i].prev = -1; } for (j = 1; j < TRELLIS_STAGES; j++) { for (i = 0; i < TRELLIS_STATES; i++) { paths[j][i].cost = INFINITY; paths[j][i].prev = -2; } } idx = 1; s->abs_pow34(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = &sce->coeffs[start]; float qmin, qmax; int nz = 0; bandaddr[idx] = w * 16 + g; qmin = INT_MAX; qmax = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } sce->zeroes[(w+w2)*16+g] = 0; nz = 1; for (i = 0; i < sce->ics.swb_sizes[g]; i++) { float t = fabsf(coefs[w2*128+i]); if (t > 0.0f) qmin = FFMIN(qmin, t); qmax = FFMAX(qmax, t); } } if (nz) { int minscale, maxscale; float minrd = INFINITY; float maxval; //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped minscale = coef2minsf(qmin); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero//.........这里部分代码省略.........
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:101,
示例11: search_for_pnsstatic void search_for_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelElement *sce){ FFPsyBand *band; int w, g, w2, i; int wlen = 1024 / sce->ics.num_windows; int bandwidth, cutoff; float *PNS = &s->scoefs[0*128], *PNS34 = &s->scoefs[1*128]; float *NOR34 = &s->scoefs[3*128]; uint8_t nextband[128]; const float lambda = s->lambda; const float freq_mult = avctx->sample_rate*0.5f/wlen; const float thr_mult = NOISE_LAMBDA_REPLACE*(100.0f/lambda); const float spread_threshold = FFMIN(0.75f, NOISE_SPREAD_THRESHOLD*FFMAX(0.5f, lambda/100.f)); const float dist_bias = av_clipf(4.f * 120 / lambda, 0.25f, 4.0f); const float pns_transient_energy_r = FFMIN(0.7f, lambda / 140.f); int refbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / ((avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : avctx->channels) * (lambda / 120.f); /** Keep this in sync with twoloop's cutoff selection */ float rate_bandwidth_multiplier = 1.5f; int prev = -1000, prev_sf = -1; int frame_bit_rate = (avctx->flags & CODEC_FLAG_QSCALE) ? (refbits * rate_bandwidth_multiplier * avctx->sample_rate / 1024) : (avctx->bit_rate / avctx->channels); frame_bit_rate *= 1.15f; if (avctx->cutoff > 0) { bandwidth = avctx->cutoff; } else { bandwidth = FFMAX(3000, AAC_CUTOFF_FROM_BITRATE(frame_bit_rate, 1, avctx->sample_rate)); } cutoff = bandwidth * 2 * wlen / avctx->sample_rate; memcpy(sce->band_alt, sce->band_type, sizeof(sce->band_type)); ff_init_nextband_map(sce, nextband); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { int wstart = w*128; for (g = 0; g < sce->ics.num_swb; g++) { int noise_sfi; float dist1 = 0.0f, dist2 = 0.0f, noise_amp; float pns_energy = 0.0f, pns_tgt_energy, energy_ratio, dist_thresh; float sfb_energy = 0.0f, threshold = 0.0f, spread = 2.0f; float min_energy = -1.0f, max_energy = 0.0f; const int start = wstart+sce->ics.swb_offset[g]; const float freq = (start-wstart)*freq_mult; const float freq_boost = FFMAX(0.88f*freq/NOISE_LOW_LIMIT, 1.0f); if (freq < NOISE_LOW_LIMIT || (start-wstart) >= cutoff) { if (!sce->zeroes[w*16+g]) prev_sf = sce->sf_idx[w*16+g]; continue; } for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; sfb_energy += band->energy; spread = FFMIN(spread, band->spread); threshold += band->threshold; if (!w2) { min_energy = max_energy = band->energy; } else { min_energy = FFMIN(min_energy, band->energy); max_energy = FFMAX(max_energy, band->energy); } } /* Ramps down at ~8000Hz and loosens the dist threshold */ dist_thresh = av_clipf(2.5f*NOISE_LOW_LIMIT/freq, 0.5f, 2.5f) * dist_bias; /* PNS is acceptable when all of these are true: * 1. high spread energy (noise-like band) * 2. near-threshold energy (high PE means the random nature of PNS content will be noticed) * 3. on short window groups, all windows have similar energy (variations in energy would be destroyed by PNS) * * At this stage, point 2 is relaxed for zeroed bands near the noise threshold (hole avoidance is more important) */ if ((!sce->zeroes[w*16+g] && !ff_sfdelta_can_remove_band(sce, nextband, prev_sf, w*16+g)) || ((sce->zeroes[w*16+g] || !sce->band_alt[w*16+g]) && sfb_energy < threshold*sqrtf(1.0f/freq_boost)) || spread < spread_threshold || (!sce->zeroes[w*16+g] && sce->band_alt[w*16+g] && sfb_energy > threshold*thr_mult*freq_boost) || min_energy < pns_transient_energy_r * max_energy ) { sce->pns_ener[w*16+g] = sfb_energy; if (!sce->zeroes[w*16+g]) prev_sf = sce->sf_idx[w*16+g]; continue; } pns_tgt_energy = sfb_energy*FFMIN(1.0f, spread*spread); noise_sfi = av_clip(roundf(log2f(pns_tgt_energy)*2), -100, 155); /* Quantize */ noise_amp = -ff_aac_pow2sf_tab[noise_sfi + POW_SF2_ZERO]; /* Dequantize */ if (prev != -1000) { int noise_sfdiff = noise_sfi - prev + SCALE_DIFF_ZERO; if (noise_sfdiff < 0 || noise_sfdiff > 2*SCALE_MAX_DIFF) { if (!sce->zeroes[w*16+g]) prev_sf = sce->sf_idx[w*16+g]; continue; } } for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {//.........这里部分代码省略.........
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:101,
示例12: x8_setup_spatial_compensation/** Collect statistics and prepare the edge pixels required by the other spatial compensation functions. * @param src pointer to the beginning of the processed block * @param dst pointer to emu_edge, edge pixels are stored the way other compensation routines do. * @param linesize byte offset between 2 vertical pixels in the source image * @param range pointer to the variable where the edge pixel range is to be stored (max-min values) * @param psum pointer to the variable where the edge pixel sum is to be stored * @param edges Informs this routine that the block is on an image border, so it has to interpolate the missing edge pixels. and some of the edge pixels should be interpolated, the flag has the following meaning: 1 - mb_x==0 - first block in the row, interpolate area #1,#2,#3; 2 - mb_y==0 - first row, interpolate area #3,#4,#5,#6; note: 1|2 - mb_x==mb_y==0 - first block, use 0x80 value for all areas; 4 - mb_x>= (mb_width-1) last block in the row, interpolate area #5;*/static void x8_setup_spatial_compensation(uint8_t *src, uint8_t *dst, int linesize, int * range, int * psum, int edges){ uint8_t * ptr; int sum; int i; int min_pix,max_pix; uint8_t c; if((edges&3)==3){ *psum=0x80*(8+1+8+2); *range=0; memset(dst,0x80,16+1+16+8); //this triggers flat_dc for sure. //flat_dc avoids all (other) prediction modes, but requires dc_level decoding. return; } min_pix=256; max_pix=-1; sum=0; if(!(edges&1)){//(mb_x!=0)//there is previous block on this row ptr=src-1;//left column, area 2 for(i=7;i>=0;i--){ c=*(ptr-1);//area1, same mb as area2, no need to check dst[area1+i]=c; c=*(ptr); sum+=c; min_pix=FFMIN(min_pix,c); max_pix=FFMAX(max_pix,c); dst[area2+i]=c; ptr+=linesize; } } if(!(edges&2)){ //(mb_y!=0)//there is row above ptr=src-linesize;//top line for(i=0;i<8;i++){ c=*(ptr+i); sum+=c; min_pix=FFMIN(min_pix, c); max_pix=FFMAX(max_pix, c); } if(edges&4){//last block on the row? memset(dst+area5,c,8);//set with last pixel fr memcpy(dst+area4, ptr, 8); }else{ memcpy(dst+area4, ptr, 16);//both area4 and 5 } memcpy(dst+area6, ptr-linesize, 8);//area6 always present in the above block } //now calculate the stuff we need if(edges&3){//mb_x==0 || mb_y==0){ int avg=(sum+4)>>3; if(edges&1){ //(mb_x==0) {//implies mb_y!=0 memset(dst+area1,avg,8+8+1);//areas 1,2 and 3 are averaged }else{//implies y==0 x!=0 memset(dst+area3,avg, 1+16+8);//areas 3, 4,5,6 } sum+=avg*9; }else{
开发者ID:Arcen,项目名称:libav,代码行数:79,
示例13: decode_framestatic int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt){ AnsiContext *s = avctx->priv_data; uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; int ret, i, count; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; if (!avctx->frame_number) { for (i=0; i<avctx->height; i++) memset(s->frame->data[0]+ i*s->frame->linesize[0], 0, avctx->width); memset(s->frame->data[1], 0, AVPALETTE_SIZE); } s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->palette_has_changed = 1; set_palette((uint32_t *)s->frame->data[1]); if (!s->first_frame) { erase_screen(avctx); s->first_frame = 1; } while(buf < buf_end) { switch(s->state) { case STATE_NORMAL: switch (buf[0]) { case 0x00: //NUL case 0x07: //BEL case 0x1A: //SUB /* ignore */ break; case 0x08: //BS s->x = FFMAX(s->x - 1, 0); break; case 0x09: //HT i = s->x / FONT_WIDTH; count = ((i + 8) & ~7) - i; for (i = 0; i < count; i++) draw_char(avctx, ' '); break; case 0x0A: //LF hscroll(avctx); case 0x0D: //CR s->x = 0; break; case 0x0C: //FF erase_screen(avctx); break; case 0x1B: //ESC s->state = STATE_ESCAPE; break; default: draw_char(avctx, buf[0]); } break; case STATE_ESCAPE: if (buf[0] == '[') { s->state = STATE_CODE; s->nb_args = 0; s->args[0] = -1; } else { s->state = STATE_NORMAL; draw_char(avctx, 0x1B); continue; } break; case STATE_CODE: switch(buf[0]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (s->nb_args < MAX_NB_ARGS) s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0'; break; case ';': s->nb_args++; if (s->nb_args < MAX_NB_ARGS) s->args[s->nb_args] = 0; break; case 'M': s->state = STATE_MUSIC_PREAMBLE; break; case '=': case '?': /* ignore */ break; default: if (s->nb_args > MAX_NB_ARGS) av_log(avctx, AV_LOG_WARNING, "args overflow (%i)/n", s->nb_args); if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0) s->nb_args++; if ((ret = execute_code(avctx, buf[0])) < 0) return ret; s->state = STATE_NORMAL; } break; case STATE_MUSIC_PREAMBLE: if (buf[0] == 0x0E || buf[0] == 0x1B)//.........这里部分代码省略.........
开发者ID:0xFFeng,项目名称:ffmpeg,代码行数:101,
示例14: execute_code/** * Execute ANSI escape code * @return 0 on success, negative on error */static int execute_code(AVCodecContext * avctx, int c){ AnsiContext *s = avctx->priv_data; int ret, i, width, height; switch(c) { case 'A': //Cursor Up s->y = FFMAX(s->y - (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), 0); break; case 'B': //Cursor Down s->y = FFMIN(s->y + (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), avctx->height - s->font_height); break; case 'C': //Cursor Right s->x = FFMIN(s->x + (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), avctx->width - FONT_WIDTH); break; case 'D': //Cursor Left s->x = FFMAX(s->x - (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), 0); break; case 'H': //Cursor Position case 'f': //Horizontal and Vertical Position s->y = s->nb_args > 0 ? av_clip((s->args[0] - 1)*s->font_height, 0, avctx->height - s->font_height) : 0; s->x = s->nb_args > 1 ? av_clip((s->args[1] - 1)*FONT_WIDTH, 0, avctx->width - FONT_WIDTH) : 0; break; case 'h': //set creen mode case 'l': //reset screen mode if (s->nb_args < 2) s->args[0] = DEFAULT_SCREEN_MODE; width = avctx->width; height = avctx->height; switch(s->args[0]) { case 0: case 1: case 4: case 5: case 13: case 19: //320x200 (25 rows) s->font = avpriv_cga_font; s->font_height = 8; width = 40<<3; height = 25<<3; break; case 2: case 3: //640x400 (25 rows) s->font = avpriv_vga16_font; s->font_height = 16; width = 80<<3; height = 25<<4; break; case 6: case 14: //640x200 (25 rows) s->font = avpriv_cga_font; s->font_height = 8; width = 80<<3; height = 25<<3; break; case 7: //set line wrapping break; case 15: case 16: //640x350 (43 rows) s->font = avpriv_cga_font; s->font_height = 8; width = 80<<3; height = 43<<3; break; case 17: case 18: //640x480 (60 rows) s->font = avpriv_cga_font; s->font_height = 8; width = 80<<3; height = 60<<4; break; default: avpriv_request_sample(avctx, "Unsupported screen mode"); } if (width != avctx->width || height != avctx->height) { av_frame_unref(s->frame); avcodec_set_dimensions(avctx, width, height); if ((ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->palette_has_changed = 1; set_palette((uint32_t *)s->frame->data[1]); erase_screen(avctx); } else if (c == 'l') { erase_screen(avctx); } break; case 'J': //Erase in Page switch (s->args[0]) { case 0: erase_line(avctx, s->x, avctx->width - s->x); if (s->y < avctx->height - s->font_height) memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0], DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]); break; case 1: erase_line(avctx, 0, s->x); if (s->y > 0) memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]); break; case 2: erase_screen(avctx); } break; case 'K': //Erase in Line//.........这里部分代码省略.........
开发者ID:0xFFeng,项目名称:ffmpeg,代码行数:101,
示例15: config_inputstatic int config_input(AVFilterLink *inlink){ AVFilterContext *ctx = inlink->dst; BoxBlurContext *boxblur = ctx->priv; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; int w = inlink->w, h = inlink->h; int cw, ch; double var_values[VARS_NB], res; char *expr; int ret; if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h))) || !(boxblur->temp[1] = av_malloc(FFMAX(w, h)))) return AVERROR(ENOMEM); boxblur->hsub = desc->log2_chroma_w; boxblur->vsub = desc->log2_chroma_h; var_values[VAR_W] = inlink->w; var_values[VAR_H] = inlink->h; var_values[VAR_CW] = cw = w>>boxblur->hsub; var_values[VAR_CH] = ch = h>>boxblur->vsub; var_values[VAR_HSUB] = 1<<boxblur->hsub; var_values[VAR_VSUB] = 1<<boxblur->vsub;#define EVAL_RADIUS_EXPR(comp) / expr = boxblur->comp##_radius_expr; / ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, / NULL, NULL, NULL, NULL, NULL, 0, ctx); / boxblur->comp##_param.radius = res; / if (ret < 0) { / av_log(NULL, AV_LOG_ERROR, / "Error when evaluating " #comp " radius expression '%s'/n", expr); / return ret; / } EVAL_RADIUS_EXPR(luma); EVAL_RADIUS_EXPR(chroma); EVAL_RADIUS_EXPR(alpha); av_log(ctx, AV_LOG_INFO, "luma_radius:%d luma_power:%d " "chroma_radius:%d chroma_power:%d " "alpha_radius:%d alpha_power:%d " "w:%d chroma_w:%d h:%d chroma_h:%d/n", boxblur->luma_param .radius, boxblur->luma_param .power, boxblur->chroma_param.radius, boxblur->chroma_param.power, boxblur->alpha_param .radius, boxblur->alpha_param .power, w, cw, h, ch);#define CHECK_RADIUS_VAL(w_, h_, comp) / if (boxblur->comp##_param.radius < 0 || / 2*boxblur->comp##_param.radius > FFMIN(w_, h_)) { / av_log(ctx, AV_LOG_ERROR, / "Invalid " #comp " radius value %d, must be >= 0 and <= %d/n", / boxblur->comp##_param.radius, FFMIN(w_, h_)/2); / return AVERROR(EINVAL); / } CHECK_RADIUS_VAL(w, h, luma); CHECK_RADIUS_VAL(cw, ch, chroma); CHECK_RADIUS_VAL(w, h, alpha); boxblur->radius[Y] = boxblur->luma_param.radius; boxblur->radius[U] = boxblur->radius[V] = boxblur->chroma_param.radius; boxblur->radius[A] = boxblur->alpha_param.radius; boxblur->power[Y] = boxblur->luma_param.power; boxblur->power[U] = boxblur->power[V] = boxblur->chroma_param.power; boxblur->power[A] = boxblur->alpha_param.power; return 0;}
开发者ID:antoinemartin,项目名称:FFmpeg,代码行数:71,
示例16: flv_write_packet//.........这里部分代码省略......... if (flags == 0) { av_log(s, AV_LOG_ERROR, "video codec %X not compatible with flv/n", enc->codec_id); return -1; } flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; break; case AVMEDIA_TYPE_AUDIO: flags = get_audio_flags(s, enc); assert(size); avio_w8(pb, FLV_TAG_TYPE_AUDIO); break; case AVMEDIA_TYPE_DATA: avio_w8(pb, FLV_TAG_TYPE_META); break; default: return AVERROR(EINVAL); } if (enc->codec_id == AV_CODEC_ID_H264) /* check if extradata looks like MP4 */ if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) if (ff_avc_parse_nal_units_buf(pkt->data, &data, &size) < 0) return -1; if (flv->delay == AV_NOPTS_VALUE) flv->delay = -pkt->dts; if (pkt->dts < -flv->delay) { av_log(s, AV_LOG_WARNING, "Packets are not in the proper order with respect to DTS/n"); return AVERROR(EINVAL); } ts = pkt->dts + flv->delay; // add delay to force positive dts /* check Speex packet duration */ if (enc->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160) av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than " "8 frames per packet. Adobe Flash " "Player cannot handle this!/n"); if (sc->last_ts < ts) sc->last_ts = ts; avio_wb24(pb, size + flags_size); avio_wb24(pb, ts); avio_w8(pb, (ts >> 24) & 0x7F); // timestamps are 32 bits _signed_ avio_wb24(pb, flv->reserved); if (enc->codec_type == AVMEDIA_TYPE_DATA) { int data_size; int metadata_size_pos = avio_tell(pb); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onTextData"); avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); avio_wb32(pb, 2); put_amf_string(pb, "type"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "Text"); put_amf_string(pb, "text"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, pkt->data); put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size = avio_tell(pb) - metadata_size_pos; avio_seek(pb, metadata_size_pos - 10, SEEK_SET); avio_wb24(pb, data_size); avio_seek(pb, data_size + 10 - 3, SEEK_CUR); avio_wb32(pb, data_size + 11); } else { avio_w8(pb,flags); if (enc->codec_id == AV_CODEC_ID_VP6) avio_w8(pb, 0); if (enc->codec_id == AV_CODEC_ID_VP6F) avio_w8(pb, enc->extradata_size ? enc->extradata[0] : 0); else if (enc->codec_id == AV_CODEC_ID_AAC) avio_w8(pb, 1); // AAC raw else if (enc->codec_id == AV_CODEC_ID_H264) { avio_w8(pb, 1); // AVC NALU avio_wb24(pb, pkt->pts - pkt->dts); } avio_write(pb, data ? data : pkt->data, size); avio_wb32(pb, size + flags_size + 11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); } avio_flush(pb); av_free(data); return pb->error;}
开发者ID:JSinglan,项目名称:libav,代码行数:101,
示例17: read_shape_from_filestatic int read_shape_from_file(int *cols, int *rows, int **values, const char *filename, void *log_ctx){ uint8_t *buf, *p, *pend; size_t size; int ret, i, j, w; if ((ret = av_file_map(filename, &buf, &size, 0, log_ctx)) < 0) return ret; /* prescan file to get the number of lines and the maximum width */ w = 0; for (i = 0; i < size; i++) { if (buf[i] == '/n') { if (*rows == INT_MAX) { av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of rows in the file/n"); return AVERROR_INVALIDDATA; } ++(*rows); *cols = FFMAX(*cols, w); w = 0; } else if (w == INT_MAX) { av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of columns in the file/n"); return AVERROR_INVALIDDATA; } w++; } if (*rows > (SIZE_MAX / sizeof(int) / *cols)) { av_log(log_ctx, AV_LOG_ERROR, "File with size %dx%d is too big/n", *rows, *cols); return AVERROR_INVALIDDATA; } if (!(*values = av_mallocz_array(sizeof(int) * *rows, *cols))) return AVERROR(ENOMEM); /* fill *values */ p = buf; pend = buf + size-1; for (i = 0; i < *rows; i++) { for (j = 0;; j++) { if (p > pend || *p == '/n') { p++; break; } else (*values)[*cols*i + j] = !!av_isgraph(*(p++)); } } av_file_unmap(buf, size);#ifdef DEBUG { char *line; if (!(line = av_malloc(*cols + 1))) return AVERROR(ENOMEM); for (i = 0; i < *rows; i++) { for (j = 0; j < *cols; j++) line[j] = (*values)[i * *cols + j] ? '@' : ' '; line[j] = 0; av_log(log_ctx, AV_LOG_DEBUG, "%3d: %s/n", i, line); } av_free(line); }#endif return 0;}
开发者ID:aroelhaqi,项目名称:FFmpeg,代码行数:66,
示例18: filter_framestatic int filter_frame(AVFilterLink *inlink, AVFrame *buf){ AVFilterContext *ctx = inlink->dst; FPSContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t delta; int i, ret; s->frames_in++; /* discard frames until we get the first timestamp */ if (s->pts == AV_NOPTS_VALUE) { if (buf->pts != AV_NOPTS_VALUE) { ret = write_to_fifo(s->fifo, buf); if (ret < 0) return ret; if (s->start_time != DBL_MAX) { double first_pts = s->start_time * AV_TIME_BASE; first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX); s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q, inlink->time_base); av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")/n", s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q, outlink->time_base)); } else { s->first_pts = s->pts = buf->pts; } } else { av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " "timestamp./n"); av_frame_free(&buf); s->drop++; } return 0; } /* now wait for the next timestamp */ if (buf->pts == AV_NOPTS_VALUE) { return write_to_fifo(s->fifo, buf); } /* number of output frames */ delta = av_rescale_q(buf->pts - s->pts, inlink->time_base, outlink->time_base); if (delta < 1) { /* drop the frame and everything buffered except the first */ AVFrame *tmp; int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*); av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s)./n", drop); s->drop += drop; av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL); flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, tmp); av_frame_free(&buf); return ret; } /* can output >= 1 frames */ for (i = 0; i < delta; i++) { AVFrame *buf_out; av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); /* duplicate the frame if needed */ if (!av_fifo_size(s->fifo) && i < delta - 1) { AVFrame *dup = av_frame_clone(buf_out); av_log(ctx, AV_LOG_DEBUG, "Duplicating frame./n"); if (dup) ret = write_to_fifo(s->fifo, dup); else ret = AVERROR(ENOMEM); if (ret < 0) { av_frame_free(&buf_out); av_frame_free(&buf); return ret; } s->dup++; } buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { av_frame_free(&buf); return ret; } s->frames_out++; } flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, buf); s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base);//.........这里部分代码省略.........
开发者ID:MarcAntoine-Arnaud,项目名称:libav,代码行数:101,
示例19: ff_h264_execute_ref_pic_marking//.........这里部分代码省略......... if (!current_ref_assigned) { /* Second field of complementary field pair; the first field of * which is already referenced. If short referenced, it * should be first entry in short_ref. If not, it must exist * in long_ref; trying to put it on the short list here is an * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3). */ if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) { /* Just mark the second field valid */ h->cur_pic_ptr->reference |= h->picture_structure; } else if (h->cur_pic_ptr->long_ref) { av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference " "assignment for second field " "in complementary field pair " "(first field is long term)/n"); err = AVERROR_INVALIDDATA; } else { pic = remove_short(h, h->cur_pic_ptr->frame_num, 0); if (pic) { av_log(h->avctx, AV_LOG_ERROR, "illegal short term buffer state detected/n"); err = AVERROR_INVALIDDATA; } if (h->short_ref_count) memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count * sizeof(H264Picture*)); h->short_ref[0] = h->cur_pic_ptr; h->short_ref_count++; h->cur_pic_ptr->reference |= h->picture_structure; /* MythTV changes - begin */ // do not add more reference frames than allowed after seeing frame num gap if (!mmco_count && h->short_ref_count > h->ps.sps->ref_frame_count) { pic = h->short_ref[h->short_ref_count - 1]; remove_short(h, pic->frame_num, 0); } /* MythTV changes - end */ } } if (h->long_ref_count + h->short_ref_count > FFMAX(h->ps.sps->ref_frame_count, 1)) { /* We have too many reference frames, probably due to corrupted * stream. Need to discard one frame. Prevents overrun of the * short_ref and long_ref buffers. */ av_log(h->avctx, AV_LOG_ERROR, "number of reference frames (%d+%d) exceeds max (%d; probably " "corrupt input), discarding one/n", h->long_ref_count, h->short_ref_count, h->ps.sps->ref_frame_count); err = AVERROR_INVALIDDATA; if (h->long_ref_count && !h->short_ref_count) { for (i = 0; i < 16; ++i) if (h->long_ref[i]) break; assert(i < 16); remove_long(h, i, 0); } else { pic = h->short_ref[h->short_ref_count - 1]; remove_short(h, pic->frame_num, 0); } } for (i = 0; i<h->short_ref_count; i++) { pic = h->short_ref[i]; if (pic->invalid_gap) { int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->ps.sps->log2_max_frame_num); if (d > h->ps.sps->ref_frame_count) remove_short(h, pic->frame_num, 0); } } print_short_term(h); print_long_term(h); for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) { if (h->ps.pps_list[i]) { const PPS *pps = (const PPS *)h->ps.pps_list[i]->data; pps_ref_count[0] = FFMAX(pps_ref_count[0], pps->ref_count[0]); pps_ref_count[1] = FFMAX(pps_ref_count[1], pps->ref_count[1]); } } if ( err >= 0 && h->long_ref_count==0 && ( h->short_ref_count<=2 || pps_ref_count[0] <= 1 + (h->picture_structure != PICT_FRAME) && pps_ref_count[1] <= 1) && pps_ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) + (2*!h->has_recovery_point) && h->cur_pic_ptr->f->pict_type == AV_PICTURE_TYPE_I){ h->cur_pic_ptr->recovered |= 1; if(!h->avctx->has_b_frames) h->frame_recovered |= FRAME_RECOVERED_SEI; }out: return (h->avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;}
开发者ID:bennettpeter,项目名称:mythtv,代码行数:101,
示例20: read_headerstatic int read_header(AVFormatContext *s){ AVIOContext *pb = s->pb; CaffContext *caf = s->priv_data; AVStream *st; uint32_t tag = 0; int found_data, ret; int64_t size, pos; avio_skip(pb, 8); /* magic, version, file flags */ /* audio description chunk */ if (avio_rb32(pb) != MKBETAG('d','e','s','c')) { av_log(s, AV_LOG_ERROR, "desc chunk not present/n"); return AVERROR_INVALIDDATA; } size = avio_rb64(pb); if (size != 32) return AVERROR_INVALIDDATA; ret = read_desc_chunk(s); if (ret) return ret; st = s->streams[0]; /* parse each chunk */ found_data = 0; while (!url_feof(pb)) { /* stop at data chunk if seeking is not supported or data chunk size is unknown */ if (found_data && (caf->data_size < 0 || !pb->seekable)) break; tag = avio_rb32(pb); size = avio_rb64(pb); pos = avio_tell(pb); if (url_feof(pb)) break; switch (tag) { case MKBETAG('d','a','t','a'): avio_skip(pb, 4); /* edit count */ caf->data_start = avio_tell(pb); caf->data_size = size < 0 ? -1 : size - 4; if (caf->data_size > 0 && pb->seekable) avio_skip(pb, caf->data_size); found_data = 1; break; case MKBETAG('c','h','a','n'): if ((ret = ff_mov_read_chan(s, s->pb, st, size)) < 0) return ret; break; /* magic cookie chunk */ case MKBETAG('k','u','k','i'): if (read_kuki_chunk(s, size)) return AVERROR_INVALIDDATA; break; /* packet table chunk */ case MKBETAG('p','a','k','t'): if (read_pakt_chunk(s, size)) return AVERROR_INVALIDDATA; break; case MKBETAG('i','n','f','o'): read_info_chunk(s, size); break; default:#define _(x) ((x) >= ' ' ? (x) : ' ') av_log(s, AV_LOG_WARNING, "skipping CAF chunk: %08X (%c%c%c%c), size %"PRId64"/n", tag, _(tag>>24), _((tag>>16)&0xFF), _((tag>>8)&0xFF), _(tag&0xFF), size);#undef _ case MKBETAG('f','r','e','e'): if (size < 0) return AVERROR_INVALIDDATA; break; } if (size > 0) { if (pos + size < pos) return AVERROR_INVALIDDATA; avio_skip(pb, FFMAX(0, pos + size - avio_tell(pb))); } } if (!found_data) return AVERROR_INVALIDDATA; if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) { if (caf->data_size > 0) st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet; } else if (st->nb_index_entries) { st->codec->bit_rate = st->codec->sample_rate * caf->data_size * 8 / st->duration; } else { av_log(s, AV_LOG_ERROR, "Missing packet table. It is required when "//.........这里部分代码省略.........
开发者ID:SmartJog,项目名称:ffmpeg,代码行数:101,
示例21: modify_qscalestatic double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, int frame_num){ RateControlContext *rcc= &s->rc_context; int qmin, qmax; const int pict_type= rce->new_pict_type; const double buffer_size= s->avctx->rc_buffer_size; const double fps= 1/av_q2d(s->avctx->time_base); const double min_rate= s->avctx->rc_min_rate / fps; const double max_rate= s->avctx->rc_max_rate / fps; get_qminmax(&qmin, &qmax, s, pict_type, frame_num); /* modulation */ if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==FF_P_TYPE) q*= s->avctx->rc_qmod_amp;//printf("q:%f/n", q); /* buffer overflow/underflow protection */ if(buffer_size){ double expected_size= rcc->buffer_index; double q_limit; if(min_rate){ double d= 2*(buffer_size - expected_size)/buffer_size; if(d>1.0) d=1.0; else if(d<0.0001) d=0.0001; q*= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); q_limit= bits2qp(rce, FFMAX((min_rate - buffer_size + rcc->buffer_index) * s->avctx->rc_min_vbv_overflow_use, 1)); if(q > q_limit){ if(s->avctx->debug&FF_DEBUG_RC){ av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f/n", q, q_limit); } q= q_limit; } } if(max_rate){ double d= 2*expected_size/buffer_size; if(d>1.0) d=1.0; else if(d<0.0001) d=0.0001; q/= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); q_limit= bits2qp(rce, FFMAX(rcc->buffer_index * s->avctx->rc_max_available_vbv_use, 1)); if(q < q_limit){ if(s->avctx->debug&FF_DEBUG_RC){ av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f/n", q, q_limit); } q= q_limit; } } }//printf("q:%f max:%f min:%f size:%f index:%d bits:%f agr:%f/n", q,max_rate, min_rate, buffer_size, rcc->buffer_index, bits, s->avctx->rc_buffer_aggressivity); if(s->avctx->rc_qsquish==0.0 || qmin==qmax){ if (q<qmin) q=qmin; else if(q>qmax) q=qmax; }else{ double min2= log(qmin); double max2= log(qmax); q= log(q); q= (q - min2)/(max2-min2) - 0.5; q*= -4.0; q= 1.0/(1.0 + exp(q)); q= q*(max2-min2) + min2; q= exp(q); } return q;}
开发者ID:Fluffiest,项目名称:splayer,代码行数:70,
示例22: read_avi_header//.........这里部分代码省略......... *(uint32_t *)s->dwChunkId = stream_read_dword_le(demuxer->stream); stream_read(demuxer->stream, (char *)s->dwReserved, 3*4); memset(s->dwReserved, 0, 3*4); print_avisuperindex_chunk(s,MSGL_V); // Check and fix this useless crap if(s->wLongsPerEntry != sizeof (avisuperindex_entry)/4) { mp_msg (MSGT_HEADER, MSGL_WARN, "Broken super index chunk size: %u/n",s->wLongsPerEntry); s->wLongsPerEntry = sizeof(avisuperindex_entry)/4; } if( ((chunksize/4)/s->wLongsPerEntry) < s->nEntriesInUse){ mp_msg (MSGT_HEADER, MSGL_WARN, "Broken super index chunk/n"); s->nEntriesInUse = (chunksize/4)/s->wLongsPerEntry; } s->aIndex = calloc(s->nEntriesInUse, sizeof (avisuperindex_entry)); s->stdidx = calloc(s->nEntriesInUse, sizeof (avistdindex_chunk)); // now the real index of indices for (i=0; i<s->nEntriesInUse; i++) { chunksize-=16; s->aIndex[i].qwOffset = stream_read_qword_le(demuxer->stream); s->aIndex[i].dwSize = stream_read_dword_le(demuxer->stream); s->aIndex[i].dwDuration = stream_read_dword_le(demuxer->stream); mp_msg (MSGT_HEADER, MSGL_V, "ODML (%.4s): [%d] 0x%016"PRIx64" 0x%04x %u/n", (s->dwChunkId), i, (uint64_t)s->aIndex[i].qwOffset, s->aIndex[i].dwSize, s->aIndex[i].dwDuration); } break; } case ckidSTREAMFORMAT: { // read 'strf' if(last_fccType==streamtypeVIDEO){ sh_video->bih=calloc(FFMAX(chunksize, sizeof(BITMAPINFOHEADER)), 1);// sh_video->bih=malloc(chunksize); memset(sh_video->bih,0,chunksize); mp_msg(MSGT_HEADER,MSGL_V,MSGTR_MPDEMUX_AVIHDR_FoundBitmapInfoHeader,chunksize,sizeof(BITMAPINFOHEADER)); stream_read(demuxer->stream,(char*) sh_video->bih,chunksize); le2me_BITMAPINFOHEADER(sh_video->bih); // swap to machine endian if (sh_video->bih->biSize > chunksize && sh_video->bih->biSize > sizeof(BITMAPINFOHEADER)) sh_video->bih->biSize = chunksize; // fixup MS-RLE header (seems to be broken for <256 color files) if(sh_video->bih->biCompression<=1 && sh_video->bih->biSize==40) sh_video->bih->biSize=chunksize; if( mp_msg_test(MSGT_HEADER,MSGL_V) ) print_video_header(sh_video->bih,MSGL_V); chunksize=0; sh_video->fps=(float)sh_video->video.dwRate/(float)sh_video->video.dwScale; sh_video->frametime=(float)sh_video->video.dwScale/(float)sh_video->video.dwRate;// if(demuxer->video->id==-1) demuxer->video->id=stream_id; // IdxFix: idxfix_videostream=stream_id; switch(sh_video->bih->biCompression){ case mmioFOURCC('M', 'P', 'G', '4'): case mmioFOURCC('m', 'p', 'g', '4'): case mmioFOURCC('D', 'I', 'V', '1'): idxfix_divx=3; // set index recovery mpeg4 flavour: msmpeg4v1 mp_msg(MSGT_HEADER,MSGL_V,MSGTR_MPDEMUX_AVIHDR_RegeneratingKeyfTableForMPG4V1); break; case mmioFOURCC('D', 'I', 'V', '3'): case mmioFOURCC('d', 'i', 'v', '3'): case mmioFOURCC('D', 'I', 'V', '4'): case mmioFOURCC('d', 'i', 'v', '4'): case mmioFOURCC('D', 'I', 'V', '5'): case mmioFOURCC('d', 'i', 'v', '5'): case mmioFOURCC('D', 'I', 'V', '6'): case mmioFOURCC('d', 'i', 'v', '6'): case mmioFOURCC('M', 'P', '4', '3'):
开发者ID:zrafa,项目名称:jlime_embedded_applications,代码行数:67,
示例23: avresample_build_matrix//.........这里部分代码省略......... matrix[FRONT_LEFT ][BACK_RIGHT] -= surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_LEFT ] += surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level * M_SQRT1_2; } else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) { matrix[FRONT_LEFT ][BACK_LEFT ] -= surround_mix_level * SQRT3_2; matrix[FRONT_LEFT ][BACK_RIGHT] -= surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_LEFT ] += surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level * SQRT3_2; } else { matrix[FRONT_LEFT ][BACK_LEFT ] += surround_mix_level; matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level; } } else if (out_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][BACK_LEFT ] += surround_mix_level * M_SQRT1_2; matrix[FRONT_CENTER][BACK_RIGHT] += surround_mix_level * M_SQRT1_2; } else return AVERROR_PATCHWELCOME; } /* mix side left/right into back or front */ if (unaccounted & AV_CH_SIDE_LEFT) { if (out_layout & AV_CH_BACK_LEFT) { /* if back channels do not exist in the input, just copy side channels to back channels, otherwise mix side into back */ if (in_layout & AV_CH_BACK_LEFT) { matrix[BACK_LEFT ][SIDE_LEFT ] += M_SQRT1_2; matrix[BACK_RIGHT][SIDE_RIGHT] += M_SQRT1_2; } else { matrix[BACK_LEFT ][SIDE_LEFT ] += 1.0; matrix[BACK_RIGHT][SIDE_RIGHT] += 1.0; } } else if (out_layout & AV_CH_BACK_CENTER) { matrix[BACK_CENTER][SIDE_LEFT ] += M_SQRT1_2; matrix[BACK_CENTER][SIDE_RIGHT] += M_SQRT1_2; } else if (out_layout & AV_CH_FRONT_LEFT) { if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) { matrix[FRONT_LEFT ][SIDE_LEFT ] -= surround_mix_level * M_SQRT1_2; matrix[FRONT_LEFT ][SIDE_RIGHT] -= surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2; } else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) { matrix[FRONT_LEFT ][SIDE_LEFT ] -= surround_mix_level * SQRT3_2; matrix[FRONT_LEFT ][SIDE_RIGHT] -= surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level * SQRT3_2; } else { matrix[FRONT_LEFT ][SIDE_LEFT ] += surround_mix_level; matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level; } } else if (out_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2; matrix[FRONT_CENTER][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2; } else return AVERROR_PATCHWELCOME; } /* mix left-of-center/right-of-center into front left/right or center */ if (unaccounted & AV_CH_FRONT_LEFT_OF_CENTER) { if (out_layout & AV_CH_FRONT_LEFT) { matrix[FRONT_LEFT ][FRONT_LEFT_OF_CENTER ] += 1.0; matrix[FRONT_RIGHT][FRONT_RIGHT_OF_CENTER] += 1.0; } else if (out_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][FRONT_LEFT_OF_CENTER ] += M_SQRT1_2; matrix[FRONT_CENTER][FRONT_RIGHT_OF_CENTER] += M_SQRT1_2; } else return AVERROR_PATCHWELCOME; } /* mix LFE into front left/right or center */ if (unaccounted & AV_CH_LOW_FREQUENCY) { if (out_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][LOW_FREQUENCY] += lfe_mix_level; } else if (out_layout & AV_CH_FRONT_LEFT) { matrix[FRONT_LEFT ][LOW_FREQUENCY] += lfe_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][LOW_FREQUENCY] += lfe_mix_level * M_SQRT1_2; } else return AVERROR_PATCHWELCOME; } /* transfer internal matrix to output matrix and calculate maximum per-channel coefficient sum */ for (out_i = i = 0; out_i < out_channels && i < 64; i++) { double sum = 0; for (out_j = j = 0; out_j < in_channels && j < 64; j++) { matrix_out[out_i * stride + out_j] = matrix[i][j]; sum += fabs(matrix[i][j]); if (in_layout & (1ULL << j)) out_j++; } maxcoef = FFMAX(maxcoef, sum); if (out_layout & (1ULL << i)) out_i++; } /* normalize */ if (normalize && maxcoef > 1.0) { for (i = 0; i < out_channels; i++) for (j = 0; j < in_channels; j++) matrix_out[i * stride + j] /= maxcoef; } return 0;}
开发者ID:TaoheGit,项目名称:hmi_sdl_android,代码行数:101,
示例24: init// open & setup audio devicestatic int init(struct ao *ao){ struct priv *ac = talloc_zero(ao, struct priv); AVCodec *codec; ao->priv = ac; if (!encode_lavc_available(ao->encode_lavc_ctx)) { MP_ERR(ao, "the option --o (output file) must be specified/n"); return -1; } pthread_mutex_lock(&ao->encode_lavc_ctx->lock); if (encode_lavc_alloc_stream(ao->encode_lavc_ctx, AVMEDIA_TYPE_AUDIO, &ac->stream, &ac->codec) < 0) { MP_ERR(ao, "could not get a new audio stream/n"); goto fail; } codec = ao->encode_lavc_ctx->ac; int samplerate = af_select_best_samplerate(ao->samplerate, codec->supported_samplerates); if (samplerate > 0) ao->samplerate = samplerate; // TODO: Remove this redundancy with encode_lavc_alloc_stream also // setting the time base. // Using codec->time_bvase is deprecated, but needed for older lavf. ac->stream->time_base.num = 1; ac->stream->time_base.den = ao->samplerate; ac->codec->time_base.num = 1; ac->codec->time_base.den = ao->samplerate; ac->codec->sample_rate = ao->samplerate; struct mp_chmap_sel sel = {0}; mp_chmap_sel_add_any(&sel); if (!ao_chmap_sel_adjust(ao, &sel, &ao->channels)) goto fail; mp_chmap_reorder_to_lavc(&ao->channels); ac->codec->channels = ao->channels.num; ac->codec->channel_layout = mp_chmap_to_lavc(&ao->channels); ac->codec->sample_fmt = AV_SAMPLE_FMT_NONE; select_format(ao, codec); ac->sample_size = af_fmt_to_bytes(ao->format); ac->codec->sample_fmt = af_to_avformat(ao->format); ac->codec->bits_per_raw_sample = ac->sample_size * 8; if (encode_lavc_open_codec(ao->encode_lavc_ctx, ac->codec) < 0) goto fail; ac->pcmhack = 0; if (ac->codec->frame_size <= 1) ac->pcmhack = av_get_bits_per_sample(ac->codec->codec_id) / 8; if (ac->pcmhack) ac->aframesize = 16384; // "enough" else ac->aframesize = ac->codec->frame_size; // enough frames for at least 0.25 seconds ac->framecount = ceil(ao->samplerate * 0.25 / ac->aframesize); // but at least one! ac->framecount = FFMAX(ac->framecount, 1); ac->savepts = AV_NOPTS_VALUE; ac->lastpts = AV_NOPTS_VALUE; ao->untimed = true; if (ao->channels.num > AV_NUM_DATA_POINTERS) goto fail; pthread_mutex_unlock(&ao->encode_lavc_ctx->lock); return 0;fail: pthread_mutex_unlock(&ao->encode_lavc_ctx->lock); ac->shutdown = true; return -1;}
开发者ID:Floens,项目名称:mpv,代码行数:88,
示例25: ff_h264_execute_ref_pic_marking//.........这里部分代码省略......... h->long_ref[ mmco[i].long_arg ]= s->current_picture_ptr; h->long_ref[ mmco[i].long_arg ]->long_ref=1; h->long_ref_count++; } s->current_picture_ptr->f.reference |= s->picture_structure; current_ref_assigned=1; break; case MMCO_SET_MAX_LONG: assert(mmco[i].long_arg <= 16); // just remove the long term which index is greater than new max for(j = mmco[i].long_arg; j<16; j++) { remove_long(h, j, 0); } break; case MMCO_RESET: while(h->short_ref_count) { remove_short(h, h->short_ref[0]->frame_num, 0); } for(j = 0; j < 16; j++) { remove_long(h, j, 0); } h->frame_num= s->current_picture_ptr->frame_num= 0; h->mmco_reset = 1; s->current_picture_ptr->mmco_reset=1; break; default: assert(0); } } if (!current_ref_assigned) { /* Second field of complementary field pair; the first field of * which is already referenced. If short referenced, it * should be first entry in short_ref. If not, it must exist * in long_ref; trying to put it on the short list here is an * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3). */ if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) { /* Just mark the second field valid */ s->current_picture_ptr->f.reference = PICT_FRAME; } else if (s->current_picture_ptr->long_ref) { av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference " "assignment for second field " "in complementary field pair " "(first field is long term)/n"); err = AVERROR_INVALIDDATA; } else { pic= remove_short(h, s->current_picture_ptr->frame_num, 0); if(pic) { av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected/n"); err = AVERROR_INVALIDDATA; } if(h->short_ref_count) memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*)); h->short_ref[0]= s->current_picture_ptr; h->short_ref_count++; s->current_picture_ptr->f.reference |= s->picture_structure; } } if (h->long_ref_count + h->short_ref_count > FFMAX(h->sps.ref_frame_count, 1)) { /* We have too many reference frames, probably due to corrupted * stream. Need to discard one frame. Prevents overrun of the * short_ref and long_ref buffers. */ av_log(h->s.avctx, AV_LOG_ERROR, "number of reference frames (%d+%d) exceeds max (%d; probably " "corrupt input), discarding one/n", h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count); err = AVERROR_INVALIDDATA; if (h->long_ref_count && !h->short_ref_count) { for (i = 0; i < 16; ++i) if (h->long_ref[i]) break; assert(i < 16); remove_long(h, i, 0); } else { pic = h->short_ref[h->short_ref_count - 1]; remove_short(h, pic->frame_num, 0); } } print_short_term(h); print_long_term(h); if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 && s->current_picture_ptr->f.pict_type == AV_PICTURE_TYPE_I) { h->sync |= 1; s->current_picture_ptr->sync |= h->sync; } return (h->s.avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;}
开发者ID:rahulr92,项目名称:FFmpeg,代码行数:101,
示例26: ffm_read_data/* first is true if we read the frame header */static int ffm_read_data(AVFormatContext *s, uint8_t *buf, int size, int header){ FFMContext *ffm = s->priv_data; AVIOContext *pb = s->pb; int len, fill_size, size1, frame_offset, id; int64_t last_pos = -1; size1 = size; while (size > 0) { redo: len = ffm->packet_end - ffm->packet_ptr; if (len < 0) return -1; if (len > size) len = size; if (len == 0) { if (avio_tell(pb) == ffm->file_size) avio_seek(pb, ffm->packet_size, SEEK_SET); retry_read: if (pb->buffer_size != ffm->packet_size) { int64_t tell = avio_tell(pb); ffio_set_buf_size(pb, ffm->packet_size); avio_seek(pb, tell, SEEK_SET); } id = avio_rb16(pb); /* PACKET_ID */ if (id != PACKET_ID) { if (ffm_resync(s, id) < 0) return -1; last_pos = avio_tell(pb); } fill_size = avio_rb16(pb); ffm->dts = avio_rb64(pb); frame_offset = avio_rb16(pb); avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE); ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size); if (ffm->packet_end < ffm->packet || frame_offset < 0) return -1; /* if first packet or resynchronization packet, we must handle it specifically */ if (ffm->first_packet || (frame_offset & 0x8000)) { if (!frame_offset) { /* This packet has no frame headers in it */ if (avio_tell(pb) >= ffm->packet_size * 3LL) { int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos); seekback = FFMAX(seekback, 0); avio_seek(pb, -seekback, SEEK_CUR); goto retry_read; } /* This is bad, we cannot find a valid frame header */ return 0; } ffm->first_packet = 0; if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE) return -1; ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE; if (!header) break; } else { ffm->packet_ptr = ffm->packet; } goto redo; } memcpy(buf, ffm->packet_ptr, len); buf += len; ffm->packet_ptr += len; size -= len; header = 0; } return size1 - size;}
开发者ID:63n,项目名称:FFmpeg,代码行数:72,
示例27: auto_matrix//.........这里部分代码省略......... }else if(out_ch_layout & AV_CH_FRONT_LEFT){ if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) { matrix[FRONT_LEFT ][SIDE_LEFT ] -= s->slev * M_SQRT1_2; matrix[FRONT_LEFT ][SIDE_RIGHT] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_LEFT ] += s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_RIGHT] += s->slev * M_SQRT1_2; } else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) { matrix[FRONT_LEFT ][SIDE_LEFT ] -= s->slev * SQRT3_2; matrix[FRONT_LEFT ][SIDE_RIGHT] -= s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_LEFT ] += s->slev * M_SQRT1_2; matrix[FRONT_RIGHT][SIDE_RIGHT] += s->slev * SQRT3_2; } else { matrix[ FRONT_LEFT][ SIDE_LEFT] += s->slev; matrix[FRONT_RIGHT][SIDE_RIGHT] += s->slev; } }else if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][SIDE_LEFT ]+= s->slev*M_SQRT1_2; matrix[ FRONT_CENTER][SIDE_RIGHT]+= s->slev*M_SQRT1_2; }else av_assert0(0); } if(unaccounted & AV_CH_FRONT_LEFT_OF_CENTER){ if(out_ch_layout & AV_CH_FRONT_LEFT){ matrix[ FRONT_LEFT][ FRONT_LEFT_OF_CENTER]+= 1.0; matrix[FRONT_RIGHT][FRONT_RIGHT_OF_CENTER]+= 1.0; }else if(out_ch_layout & AV_CH_FRONT_CENTER){ matrix[ FRONT_CENTER][ FRONT_LEFT_OF_CENTER]+= M_SQRT1_2; matrix[ FRONT_CENTER][FRONT_RIGHT_OF_CENTER]+= M_SQRT1_2; }else av_assert0(0); } /* mix LFE into front left/right or center */ if (unaccounted & AV_CH_LOW_FREQUENCY) { if (out_ch_layout & AV_CH_FRONT_CENTER) { matrix[FRONT_CENTER][LOW_FREQUENCY] += s->lfe_mix_level; } else if (out_ch_layout & AV_CH_FRONT_LEFT) { matrix[FRONT_LEFT ][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2; matrix[FRONT_RIGHT][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2; } else av_assert0(0); } for(out_i=i=0; i<64; i++){ double sum=0; int in_i=0; if((out_ch_layout & (1ULL<<i)) == 0) continue; for(j=0; j<64; j++){ if((in_ch_layout & (1ULL<<j)) == 0) continue; if (i < FF_ARRAY_ELEMS(matrix) && j < FF_ARRAY_ELEMS(matrix[0])) s->matrix[out_i][in_i]= matrix[i][j]; else s->matrix[out_i][in_i]= i == j && (in_ch_layout & out_ch_layout & (1ULL<<i)); sum += fabs(s->matrix[out_i][in_i]); in_i++; } maxcoef= FFMAX(maxcoef, sum); out_i++; } if(s->rematrix_volume < 0) maxcoef = -s->rematrix_volume; if (s->rematrix_maxval > 0) { maxval = s->rematrix_maxval; } else if ( av_get_packed_sample_fmt(s->out_sample_fmt) < AV_SAMPLE_FMT_FLT || av_get_packed_sample_fmt(s->int_sample_fmt) < AV_SAMPLE_FMT_FLT) { maxval = 1.0; } else maxval = INT_MAX; if(maxcoef > maxval || s->rematrix_volume < 0){ maxcoef /= maxval; for(i=0; i<SWR_CH_MAX; i++) for(j=0; j<SWR_CH_MAX; j++){ s->matrix[i][j] /= maxcoef; } } if(s->rematrix_volume > 0){ for(i=0; i<SWR_CH_MAX; i++) for(j=0; j<SWR_CH_MAX; j++){ s->matrix[i][j] *= s->rematrix_volume; } } av_log(s, AV_LOG_DEBUG, "Matrix coefficients:/n"); for(i=0; i<av_get_channel_layout_nb_channels(out_ch_layout); i++){ const char *c = av_get_channel_name(av_channel_layout_extract_channel(out_ch_layout, i)); av_log(s, AV_LOG_DEBUG, "%s: ", c ? c : "?"); for(j=0; j<av_get_channel_layout_nb_channels(in_ch_layout); j++){ c = av_get_channel_name(av_channel_layout_extract_channel(in_ch_layout, j)); av_log(s, AV_LOG_DEBUG, "%s:%f ", c ? c : "?", s->matrix[i][j]); } av_log(s, AV_LOG_DEBUG, "/n"); } return 0;}
开发者ID:0day-ci,项目名称:FFmpeg,代码行数:101,
示例28: scc_read_headerstatic int scc_read_header(AVFormatContext *s){ SCCContext *scc = s->priv_data; AVStream *st = avformat_new_stream(s, NULL); char line[4096], line2[4096]; int count = 0, ret = 0; ptrdiff_t len2, len; uint8_t out[4096]; FFTextReader tr; ff_text_init_avio(s, &tr, s->pb); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 64, 1, 1000); st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codecpar->codec_id = AV_CODEC_ID_EIA_608; while (!ff_text_eof(&tr)) { const int64_t pos = ff_text_pos(&tr); char *saveptr = NULL, *lline; int hh1, mm1, ss1, fs1, i; int hh2, mm2, ss2, fs2; int64_t ts_start, ts_end; AVPacket *sub; if (count == 0) { while (!ff_text_eof(&tr)) { len = ff_subtitles_read_line(&tr, line, sizeof(line)); if (len > 13) break; } } if (!strncmp(line, "Scenarist_SCC V1.0", 18)) continue; if (sscanf(line, "%d:%d:%d%*[:;]%d", &hh1, &mm1, &ss1, &fs1) != 4) continue; ts_start = (hh1 * 3600LL + mm1 * 60LL + ss1) * 1000LL + fs1 * 33; while (!ff_text_eof(&tr)) { len2 = ff_subtitles_read_line(&tr, line2, sizeof(line2)); if (len2 > 13) break; } if (sscanf(line2, "%d:%d:%d%*[:;]%d", &hh2, &mm2, &ss2, &fs2) != 4) continue; ts_end = (hh2 * 3600LL + mm2 * 60LL + ss2) * 1000LL + fs2 * 33; count++; lline = (char *)&line; lline += 12; for (i = 0; i < 4095; i += 3) { char *ptr = av_strtok(lline, " ", &saveptr); char c1, c2, c3, c4; if (!ptr) break; if (sscanf(ptr, "%c%c%c%c", &c1, &c2, &c3, &c4) != 4) break; lline = NULL; out[i+0] = 0xfc; out[i+1] = convert(c2) | (convert(c1) << 4); out[i+2] = convert(c4) | (convert(c3) << 4); } out[i] = 0; sub = ff_subtitles_queue_insert(&scc->q, out, i, 0); if (!sub) return AVERROR(ENOMEM); sub->pos = pos; sub->pts = ts_start; sub->duration = FFMAX(1200, ts_end - ts_start); memmove(line, line2, sizeof(line)); FFSWAP(ptrdiff_t, len, len2); } ff_subtitles_queue_finalize(s, &scc->q); return ret;}
开发者ID:DeHackEd,项目名称:FFmpeg,代码行数:87,
示例29: aac_encode_framestatic int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr){ AACEncContext *s = avctx->priv_data; float **samples = s->planar_samples, *samples2, *la, *overlap; ChannelElement *cpe; SingleChannelElement *sce; IndividualChannelStream *ics; int i, its, ch, w, chans, tag, start_ch, ret, frame_bits; int target_bits, rate_bits, too_many_bits, too_few_bits; int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0; int chan_el_counter[4]; FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; if (s->last_frame == 2) return 0; /* add current frame to queue */ if (frame) { if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) return ret; } copy_input_samples(s, frame); if (s->psypp) ff_psy_preprocess(s->psypp, s->planar_samples, s->channels); if (!avctx->frame_number) return 0; start_ch = 0; for (i = 0; i < s->chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; tag = s->chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; for (ch = 0; ch < chans; ch++) { int k; float clip_avoidance_factor; sce = &cpe->ch[ch]; ics = &sce->ics; s->cur_channel = start_ch + ch; overlap = &samples[s->cur_channel][0]; samples2 = overlap + 1024; la = samples2 + (448+64); if (!frame) la = NULL; if (tag == TYPE_LFE) { wi[ch].window_type[0] = wi[ch].window_type[1] = ONLY_LONG_SEQUENCE; wi[ch].window_shape = 0; wi[ch].num_windows = 1; wi[ch].grouping[0] = 1; wi[ch].clipping[0] = 0; /* Only the lowest 12 coefficients are used in a LFE channel. * The expression below results in only the bottom 8 coefficients * being used for 11.025kHz to 16kHz sample rates. */ ics->num_swb = s->samplerate_index >= 8 ? 1 : 3; } else { wi[ch] = s->psy.model->window(&s->psy, samples2, la, s->cur_channel, ics->window_sequence[0]); } ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = wi[ch].window_type[0]; ics->use_kb_window[1] = ics->use_kb_window[0]; ics->use_kb_window[0] = wi[ch].window_shape; ics->num_windows = wi[ch].num_windows; ics->swb_sizes = s->psy.bands [ics->num_windows == 8]; ics->num_swb = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8]; ics->max_sfb = FFMIN(ics->max_sfb, ics->num_swb); ics->swb_offset = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ? ff_swb_offset_128 [s->samplerate_index]: ff_swb_offset_1024[s->samplerate_index]; ics->tns_max_bands = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ? ff_tns_max_bands_128 [s->samplerate_index]: ff_tns_max_bands_1024[s->samplerate_index]; for (w = 0; w < ics->num_windows; w++) ics->group_len[w] = wi[ch].grouping[w]; /* Calculate input sample maximums and evaluate clipping risk */ clip_avoidance_factor = 0.0f; for (w = 0; w < ics->num_windows; w++) { const float *wbuf = overlap + w * 128; const int wlen = 2048 / ics->num_windows; float max = 0; int j; /* mdct input is 2 * output */ for (j = 0; j < wlen; j++) max = FFMAX(max, fabsf(wbuf[j])); wi[ch].clipping[w] = max; } for (w = 0; w < ics->num_windows; w++) { if (wi[ch].clipping[w] > CLIP_AVOIDANCE_FACTOR) { ics->window_clipping[w] = 1; clip_avoidance_factor = FFMAX(clip_avoidance_factor, wi[ch].clipping[w]); } else { ics->window_clipping[w] = 0; }//.........这里部分代码省略.........
开发者ID:chris-magic,项目名称:xplayer,代码行数:101,
示例30: addwidget//.........这里部分代码省略......... mywidget->msg = evNames[i].msg; break; } } if (*desc == 'r') { mywidget->zeropoint = appRadian(mywidget, x0, y0); mywidget->arclength = appRadian(mywidget, x1, y1) - mywidget->zeropoint; if (mywidget->arclength < 0.0) mywidget->arclength += 2 * M_PI; // else check if radians of (x0,y0) and (x1,y1) only differ below threshold else if (mywidget->arclength < 0.05) mywidget->arclength = 2 * M_PI; } mp_msg(MSGT_GPLAYER, MSGL_DBG2, "[SKIN] [ITEM] %s %s %i %i %s %i ", (mywidget->type == tyHpotmeter) ? "[HPOTMETER]" : (mywidget->type == tyVpotmeter) ? "[VPOTMETER]" : "[RPOTMETER]", (mywidget->bitmap[0]) ? mywidget->bitmap[0]->name : NULL, mywidget->width, mywidget->height, (mywidget->bitmap[1]) ? mywidget->bitmap[1]->name : NULL, mywidget->phases); if (*desc == 'r') mp_msg(MSGT_GPLAYER, MSGL_DBG2, "%i,%i %i,%i ", x0, y0, x1, y1); mp_msg(MSGT_GPLAYER, MSGL_DBG2, "%f %i %i %i %i msg %i/n", mywidget->value, mywidget->wx, mywidget->wy, mywidget->wwidth, mywidget->wwidth, mywidget->msg); if (mywidget->bitmap[0] == NULL || mywidget->width == 0 || mywidget->height == 0) { mywidget->bitmap[0] = mywidget->bitmap[1]; mywidget->width = mywidget->wwidth; mywidget->height = mywidget->wheight; } if (*desc == 'r') { mywidget->maxwh = FFMAX(mywidget->width, mywidget->height); // clickedinsidewidget() checks with width/height, so set it mywidget->width = mywidget->wwidth; mywidget->height = mywidget->wheight; } } else if(!strncmp(desc, "pimage", 6)) { int base = counttonextchar(desc, '=') + 1; int i; /* pimage = phases, numphases, default, X, Y, width, height, message */ mywidget->type = tyPimage; mywidget->bitmap[0] = pngRead(skin, findnextstring(temp, desc, &base)); mywidget->phases = atoi(findnextstring(temp, desc, &base)); mywidget->value = atof(findnextstring(temp, desc, &base)); mywidget->wx = mywidget->x = atoi(findnextstring(temp, desc, &base)); mywidget->wy = mywidget->y = atoi(findnextstring(temp, desc, &base)); mywidget->wwidth = mywidget->width = atoi(findnextstring(temp, desc, &base)); mywidget->wheight = mywidget->height = atoi(findnextstring(temp, desc, &base)); findnextstring(temp, desc, &base); mywidget->msg = evNone; for (i=0; i<evBoxs; i++) { if(!strcmp(temp, evNames[i].name)) { // legacy skin_legacy("evSetURL", temp); mywidget->msg=evNames[i].msg; break; } }
开发者ID:AungWinnHtut,项目名称:mplayer,代码行数:67,
注:本文中的FFMAX函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ FFMIN函数代码示例 C++ FFI_FN函数代码示例 |