这篇教程C++ FFMIN函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中FFMIN函数的典型用法代码示例。如果您正苦于以下问题:C++ FFMIN函数的具体用法?C++ FFMIN怎么用?C++ FFMIN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了FFMIN函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: CheckPointerSTDMETHODIMP CDecAvcodec::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtStartIn, REFERENCE_TIME rtStopIn, BOOL bSyncPoint, BOOL bDiscontinuity){ CheckPointer(m_pAVCtx, E_UNEXPECTED); int got_picture = 0; int used_bytes = 0; BOOL bFlush = (buffer == NULL); BOOL bEndOfSequence = FALSE; AVPacket avpkt; av_init_packet(&avpkt); if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) { if (!m_bFFReordering) { m_tcThreadBuffer[m_CurrentThread].rtStart = rtStartIn; m_tcThreadBuffer[m_CurrentThread].rtStop = rtStopIn; } m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count; } else if (m_bBFrameDelay) { m_tcBFrameDelay[m_nBFramePos].rtStart = rtStartIn; m_tcBFrameDelay[m_nBFramePos].rtStop = rtStopIn; m_nBFramePos = !m_nBFramePos; } uint8_t *pDataBuffer = NULL; if (!bFlush && buflen > 0) { if (!m_bInputPadded && (!(m_pAVCtx->active_thread_type & FF_THREAD_FRAME) || m_pParser)) { // Copy bitstream into temporary buffer to ensure overread protection // Verify buffer size if (buflen > m_nFFBufferSize) { m_nFFBufferSize = buflen; m_pFFBuffer = (BYTE *)av_realloc_f(m_pFFBuffer, m_nFFBufferSize + FF_INPUT_BUFFER_PADDING_SIZE, 1); if (!m_pFFBuffer) { m_nFFBufferSize = 0; return E_OUTOFMEMORY; } } memcpy(m_pFFBuffer, buffer, buflen); memset(m_pFFBuffer+buflen, 0, FF_INPUT_BUFFER_PADDING_SIZE); pDataBuffer = m_pFFBuffer; } else { pDataBuffer = (uint8_t *)buffer; } if (m_nCodecId == AV_CODEC_ID_VP8 && m_bWaitingForKeyFrame) { if (!(pDataBuffer[0] & 1)) { DbgLog((LOG_TRACE, 10, L"::Decode(): Found VP8 key-frame, resuming decoding")); m_bWaitingForKeyFrame = FALSE; } else { return S_OK; } } } while (buflen > 0 || bFlush) { REFERENCE_TIME rtStart = rtStartIn, rtStop = rtStopIn; if (!bFlush) { avpkt.data = pDataBuffer; avpkt.size = buflen; avpkt.pts = rtStartIn; if (rtStartIn != AV_NOPTS_VALUE && rtStopIn != AV_NOPTS_VALUE) avpkt.duration = (int)(rtStopIn - rtStartIn); else avpkt.duration = 0; avpkt.flags = AV_PKT_FLAG_KEY; if (m_bHasPalette) { m_bHasPalette = FALSE; uint32_t *pal = (uint32_t *)av_packet_new_side_data(&avpkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); int pal_size = FFMIN((1 << m_pAVCtx->bits_per_coded_sample) << 2, m_pAVCtx->extradata_size); uint8_t *pal_src = m_pAVCtx->extradata + m_pAVCtx->extradata_size - pal_size; for (int i = 0; i < pal_size/4; i++) pal[i] = 0xFF<<24 | AV_RL32(pal_src+4*i); } } else { avpkt.data = NULL; avpkt.size = 0; } // Parse the data if a parser is present // This is mandatory for MPEG-1/2 if (m_pParser) { BYTE *pOut = NULL; int pOut_size = 0; used_bytes = av_parser_parse2(m_pParser, m_pAVCtx, &pOut, &pOut_size, avpkt.data, avpkt.size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); if (used_bytes == 0 && pOut_size == 0 && !bFlush) { DbgLog((LOG_TRACE, 50, L"::Decode() - could not process buffer, starving?")); break; } else if (used_bytes > 0) { buflen -= used_bytes; pDataBuffer += used_bytes; } // Update start time cache//.........这里部分代码省略.........
开发者ID:JERUKA9,项目名称:LAVFilters,代码行数:101,
示例2: ff_amf_tag_contentsstatic void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end){ int size; char buf[1024]; if (data >= data_end) return; switch (*data++) { case AMF_DATA_TYPE_NUMBER: av_log(ctx, AV_LOG_DEBUG, " number %g/n", av_int2double(AV_RB64(data))); return; case AMF_DATA_TYPE_BOOL: av_log(ctx, AV_LOG_DEBUG, " bool %d/n", *data); return; case AMF_DATA_TYPE_STRING: case AMF_DATA_TYPE_LONG_STRING: if (data[-1] == AMF_DATA_TYPE_STRING) { size = bytestream_get_be16(&data); } else { size = bytestream_get_be32(&data); } size = FFMIN(size, 1023); memcpy(buf, data, size); buf[size] = 0; av_log(ctx, AV_LOG_DEBUG, " string '%s'/n", buf); return; case AMF_DATA_TYPE_NULL: av_log(ctx, AV_LOG_DEBUG, " NULL/n"); return; case AMF_DATA_TYPE_ARRAY: data += 4; case AMF_DATA_TYPE_OBJECT: av_log(ctx, AV_LOG_DEBUG, " {/n"); for (;;) { int size = bytestream_get_be16(&data); int t; memcpy(buf, data, size); buf[size] = 0; if (!size) { av_log(ctx, AV_LOG_DEBUG, " }/n"); data++; break; } if (data + size >= data_end || data + size < data) return; data += size; av_log(ctx, AV_LOG_DEBUG, " %s: ", buf); ff_amf_tag_contents(ctx, data, data_end); t = ff_amf_tag_size(data, data_end); if (t < 0 || data + t >= data_end) return; data += t; } return; case AMF_DATA_TYPE_OBJECT_END: av_log(ctx, AV_LOG_DEBUG, " }/n"); return; default: return; }}
开发者ID:Fatbag,项目名称:libav,代码行数:61,
示例3: frame_thread_initstatic int frame_thread_init(AVCodecContext *avctx){ int thread_count = avctx->thread_count; AVCodec *codec = avctx->codec; AVCodecContext *src = avctx; FrameThreadContext *fctx; int i, err = 0; if (!thread_count) { int nb_cpus = get_logical_cpus(avctx); if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv) nb_cpus = 1; // use number of cores + 1 as thread count if there is more than one if (nb_cpus > 1) thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS); else thread_count = avctx->thread_count = 1; } if (thread_count <= 1) { avctx->active_thread_type = 0; return 0; } avctx->thread_opaque = fctx = av_mallocz(sizeof(FrameThreadContext)); fctx->threads = av_mallocz(sizeof(PerThreadContext) * thread_count); pthread_mutex_init(&fctx->buffer_mutex, NULL); fctx->delaying = 1; for (i = 0; i < thread_count; i++) { AVCodecContext *copy = av_malloc(sizeof(AVCodecContext)); PerThreadContext *p = &fctx->threads[i]; pthread_mutex_init(&p->mutex, NULL); pthread_mutex_init(&p->progress_mutex, NULL); pthread_cond_init(&p->input_cond, NULL); pthread_cond_init(&p->progress_cond, NULL); pthread_cond_init(&p->output_cond, NULL); p->parent = fctx; p->avctx = copy; if (!copy) { err = AVERROR(ENOMEM); goto error; } *copy = *src; copy->thread_opaque = p; copy->pkt = &p->avpkt; if (!i) { src = copy; if (codec->init) err = codec->init(copy); update_context_from_thread(avctx, copy, 1); } else { copy->priv_data = av_malloc(codec->priv_data_size); if (!copy->priv_data) { err = AVERROR(ENOMEM); goto error; } memcpy(copy->priv_data, src->priv_data, codec->priv_data_size); copy->internal = av_malloc(sizeof(AVCodecInternal)); if (!copy->internal) { err = AVERROR(ENOMEM); goto error; } *copy->internal = *src->internal; copy->internal->is_copy = 1; if (codec->init_thread_copy) err = codec->init_thread_copy(copy); } if (err) goto error; err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p)); p->thread_init= !err; if(!p->thread_init) goto error; } return 0;error: frame_thread_free(avctx, i+1); return err;}
开发者ID:Samangan,项目名称:mpc-hc,代码行数:93,
示例4: multiple_resamplestatic int multiple_resample(ResampleContext *c, AudioData *dst, int dst_size, AudioData *src, int src_size, int *consumed){ int i; int av_unused mm_flags = av_get_cpu_flags(); int need_emms = c->format == AV_SAMPLE_FMT_S16P && ARCH_X86_32 && (mm_flags & (AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE2)) == AV_CPU_FLAG_MMX2; int64_t max_src_size = (INT64_MAX/2 / c->phase_count) / c->src_incr; if (c->compensation_distance) dst_size = FFMIN(dst_size, c->compensation_distance); src_size = FFMIN(src_size, max_src_size); *consumed = 0; if (c->filter_length == 1 && c->phase_count == 1) { int64_t index2= (1LL<<32)*c->frac/c->src_incr + (1LL<<32)*c->index; int64_t incr= (1LL<<32) * c->dst_incr / c->src_incr; int new_size = (src_size * (int64_t)c->src_incr - c->frac + c->dst_incr - 1) / c->dst_incr; dst_size = FFMAX(FFMIN(dst_size, new_size), 0); if (dst_size > 0) { for (i = 0; i < dst->ch_count; i++) { c->dsp.resample_one(dst->ch[i], src->ch[i], dst_size, index2, incr); if (i+1 == dst->ch_count) { c->index += dst_size * c->dst_incr_div; c->index += (c->frac + dst_size * (int64_t)c->dst_incr_mod) / c->src_incr; av_assert2(c->index >= 0); *consumed = c->index; c->frac = (c->frac + dst_size * (int64_t)c->dst_incr_mod) % c->src_incr; c->index = 0; } } } } else { int64_t end_index = (1LL + src_size - c->filter_length) * c->phase_count; int64_t delta_frac = (end_index - c->index) * c->src_incr - c->frac; int delta_n = (delta_frac + c->dst_incr - 1) / c->dst_incr; int (*resample_func)(struct ResampleContext *c, void *dst, const void *src, int n, int update_ctx); dst_size = FFMAX(FFMIN(dst_size, delta_n), 0); if (dst_size > 0) { /* resample_linear and resample_common should have same behavior * when frac and dst_incr_mod are zero */ resample_func = (c->linear && (c->frac || c->dst_incr_mod)) ? c->dsp.resample_linear : c->dsp.resample_common; for (i = 0; i < dst->ch_count; i++) *consumed = resample_func(c, dst->ch[i], src->ch[i], dst_size, i+1 == dst->ch_count); } } if(need_emms) emms_c(); if (c->compensation_distance) { c->compensation_distance -= dst_size; if (!c->compensation_distance) { c->dst_incr = c->ideal_dst_incr; c->dst_incr_div = c->dst_incr / c->src_incr; c->dst_incr_mod = c->dst_incr % c->src_incr; } } return dst_size;}
开发者ID:jpcottin,项目名称:FFmpeg,代码行数:64,
示例5: decode_framestatic int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AVSubtitle *sub = data; const uint8_t *buf_end = buf + buf_size; uint8_t *bitmap; int w, h, x, y, rlelen, i; int64_t packet_time = 0; GetBitContext gb; memset(sub, 0, sizeof(*sub)); // check that at least header fits if (buf_size < 27 + 7 * 2 + 4 * 3) { av_log(avctx, AV_LOG_ERROR, "coded frame too small/n"); return -1; } // read start and end time if (buf[0] != '[' || buf[13] != '-' || buf[26] != ']') { av_log(avctx, AV_LOG_ERROR, "invalid time code/n"); return -1; } if (avpkt->pts != AV_NOPTS_VALUE) packet_time = av_rescale_q(avpkt->pts, AV_TIME_BASE_Q, (AVRational){1, 1000}); sub->start_display_time = parse_timecode(buf + 1, packet_time); sub->end_display_time = parse_timecode(buf + 14, packet_time); buf += 27; // read header w = bytestream_get_le16(&buf); h = bytestream_get_le16(&buf); if (avcodec_check_dimensions(avctx, w, h) < 0) return -1; x = bytestream_get_le16(&buf); y = bytestream_get_le16(&buf);#ifdef SUPPORT_DIVX_DRM if((video_height - (y+h)) > 30) { y = video_height-30-h-1; }#endif /* end of SUPPORT_DIVX_DRM */ // skip bottom right position, it gives no new information bytestream_get_le16(&buf); bytestream_get_le16(&buf); rlelen = bytestream_get_le16(&buf); // allocate sub and set values sub->rects = av_mallocz(sizeof(*sub->rects)); sub->rects[0] = av_mallocz(sizeof(*sub->rects[0])); sub->num_rects = 1; sub->rects[0]->x = x; sub->rects[0]->y = y; sub->rects[0]->w = w; sub->rects[0]->h = h; sub->rects[0]->type = SUBTITLE_BITMAP; sub->rects[0]->pict.linesize[0] = w; sub->rects[0]->pict.data[0] = av_malloc(w * h); sub->rects[0]->nb_colors = 4; sub->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE); // read palette for (i = 0; i < sub->rects[0]->nb_colors; i++) ((uint32_t*)sub->rects[0]->pict.data[1])[i] = bytestream_get_be24(&buf); // make all except background (first entry) non-transparent#if 1 if(sub_type == 2) //DXSA { for (i = 0; i < sub->rects[0]->nb_colors; i++) { if(buf[i]) ((uint32_t*)sub->rects[0]->pict.data[1])[i] |= 0xff000000; } if(buf[0] == buf[1] && buf[0] == buf[2] && buf[0] == buf[3] && buf[1] == buf[2] && buf[1] == buf[3] && buf[2] == buf[3] && buf[0] < 0xff) { transport_float = (float)buf[0] / 256.0; } buf += 4; } else { for (i = 1; i < sub->rects[0]->nb_colors; i++) ((uint32_t*)sub->rects[0]->pict.data[1])[i] |= 0xff000000; }#else for (i = 1; i < sub->rects[0]->nb_colors; i++) ((uint32_t*)sub->rects[0]->pict.data[1])[i] |= 0xff000000;#endif // process RLE-compressed data rlelen = FFMIN(rlelen, buf_end - buf); init_get_bits(&gb, buf, rlelen * 8); bitmap = sub->rects[0]->pict.data[0]; for (y = 0; y < h; y++) { // interlaced: do odd lines if (y == (h + 1) / 2) bitmap = sub->rects[0]->pict.data[0] + w; for (x = 0; x < w; ) { int log2 = ff_log2_tab[show_bits(&gb, 8)];//.........这里部分代码省略.........
开发者ID:dr4g0nsr,项目名称:mplayer-skyviia-8860,代码行数:101,
示例6: qtrle_encode_line/** * Computes the best RLE sequence for a line */static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t **buf){ int width=s->avctx->width; int i; signed char rlecode; /* We will use it to compute the best bulk copy sequence */ unsigned int bulkcount; /* This will be the number of pixels equal to the preivous frame one's * starting from the ith pixel */ unsigned int skipcount; /* This will be the number of consecutive equal pixels in the current * frame, starting from the ith one also */ unsigned int repeatcount; /* The cost of the three different possibilities */ int total_bulk_cost; int total_skip_cost; int total_repeat_cost; int temp_cost; int j; uint8_t *this_line = p-> data[0] + line*p->linesize[0] + (width - 1)*s->pixel_size; uint8_t *prev_line = s->previous_frame.data[0] + line*p->linesize[0] + (width - 1)*s->pixel_size; s->length_table[width] = 0; skipcount = 0; for (i = width - 1; i >= 0; i--) { if (!s->frame.key_frame && !memcmp(this_line, prev_line, s->pixel_size)) skipcount = FFMIN(skipcount + 1, MAX_RLE_SKIP); else skipcount = 0; total_skip_cost = s->length_table[i + skipcount] + 2; s->skip_table[i] = skipcount; if (i < width - 1 && !memcmp(this_line, this_line + s->pixel_size, s->pixel_size)) repeatcount = FFMIN(repeatcount + 1, MAX_RLE_REPEAT); else repeatcount = 1; total_repeat_cost = s->length_table[i + repeatcount] + 1 + s->pixel_size; /* skip code is free for the first pixel, it costs one byte for repeat and bulk copy * so let's make it aware */ if (i == 0) { total_skip_cost--; total_repeat_cost++; } if (repeatcount > 1 && (skipcount == 0 || total_repeat_cost < total_skip_cost)) { /* repeat is the best */ s->length_table[i] = total_repeat_cost; s->rlecode_table[i] = -repeatcount; } else if (skipcount > 0) { /* skip is the best choice here */ s->length_table[i] = total_skip_cost; s->rlecode_table[i] = 0; } else { /* We cannot do neither skip nor repeat * thus we search for the best bulk copy to do */ int limit = FFMIN(width - i, MAX_RLE_BULK); temp_cost = 1 + s->pixel_size + !i; total_bulk_cost = INT_MAX; for (j = 1; j <= limit; j++) { if (s->length_table[i + j] + temp_cost < total_bulk_cost) { /* We have found a better bulk copy ... */ total_bulk_cost = s->length_table[i + j] + temp_cost; bulkcount = j; } temp_cost += s->pixel_size; } s->length_table[i] = total_bulk_cost; s->rlecode_table[i] = bulkcount; } this_line -= s->pixel_size; prev_line -= s->pixel_size; } /* Good ! Now we have the best sequence for this line, let's ouput it */ /* We do a special case for the first pixel so that we avoid testing it in * the whole loop */ i=0; this_line = p-> data[0] + line*p->linesize[0];//.........这里部分代码省略.........
开发者ID:OESF-DLNA,项目名称:upnp-extension,代码行数:101,
示例7: build_filter//.........这里部分代码省略......... switch(c->format){ case AV_SAMPLE_FMT_S16P: for(i=0;i<tap_count;i++) ((int16_t*)filter)[ph * alloc + i] = av_clip_int16(lrintf(tab[i] * scale / norm)); if (phase_count % 2) break; if (tap_count % 2 == 0 || tap_count == 1) { for (i = 0; i < tap_count; i++) ((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int16_t*)filter)[ph * alloc + i]; } else { for (i = 1; i <= tap_count; i++) ((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-i] = av_clip_int16(lrintf(tab[i] * scale / (norm - tab[0] + tab[tap_count]))); } break; case AV_SAMPLE_FMT_S32P: for(i=0;i<tap_count;i++) ((int32_t*)filter)[ph * alloc + i] = av_clipl_int32(llrint(tab[i] * scale / norm)); if (phase_count % 2) break; if (tap_count % 2 == 0 || tap_count == 1) { for (i = 0; i < tap_count; i++) ((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int32_t*)filter)[ph * alloc + i]; } else { for (i = 1; i <= tap_count; i++) ((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-i] = av_clipl_int32(llrint(tab[i] * scale / (norm - tab[0] + tab[tap_count]))); } break; case AV_SAMPLE_FMT_FLTP: for(i=0;i<tap_count;i++) ((float*)filter)[ph * alloc + i] = tab[i] * scale / norm; if (phase_count % 2) break; if (tap_count % 2 == 0 || tap_count == 1) { for (i = 0; i < tap_count; i++) ((float*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((float*)filter)[ph * alloc + i]; } else { for (i = 1; i <= tap_count; i++) ((float*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]); } break; case AV_SAMPLE_FMT_DBLP: for(i=0;i<tap_count;i++) ((double*)filter)[ph * alloc + i] = tab[i] * scale / norm; if (phase_count % 2) break; if (tap_count % 2 == 0 || tap_count == 1) { for (i = 0; i < tap_count; i++) ((double*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((double*)filter)[ph * alloc + i]; } else { for (i = 1; i <= tap_count; i++) ((double*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]); } break; } }#if 0 {#define LEN 1024 int j,k; double sine[LEN + tap_count]; double filtered[LEN]; double maxff=-2, minff=2, maxsf=-2, minsf=2; for(i=0; i<LEN; i++){ double ss=0, sf=0, ff=0; for(j=0; j<LEN+tap_count; j++) sine[j]= cos(i*j*M_PI/LEN); for(j=0; j<LEN; j++){ double sum=0; ph=0; for(k=0; k<tap_count; k++) sum += filter[ph * tap_count + k] * sine[k+j]; filtered[j]= sum / (1<<FILTER_SHIFT); ss+= sine[j + center] * sine[j + center]; ff+= filtered[j] * filtered[j]; sf+= sine[j + center] * filtered[j]; } ss= sqrt(2*ss/LEN); ff= sqrt(2*ff/LEN); sf= 2*sf/LEN; maxff= FFMAX(maxff, ff); minff= FFMIN(minff, ff); maxsf= FFMAX(maxsf, sf); minsf= FFMIN(minsf, sf); if(i%11==0){ av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e/n", i, ss, maxff, minff, maxsf, minsf); minff=minsf= 2; maxff=maxsf= -2; } } }#endif ret = 0;fail: av_free(tab); av_free(sin_lut); return ret;}
开发者ID:jpcottin,项目名称:FFmpeg,代码行数:101,
示例8: uiEvent//.........这里部分代码省略......... break; case evForward10sec: uiRelSeek(10); break; case evBackward10sec: uiRelSeek(-10); break; case evSetMoviePosition: guiInfo.Position = param; uiPctSeek(guiInfo.Position); break; case evIncVolume: mplayer_put_key(KEY_VOLUME_UP); break; case evDecVolume: mplayer_put_key(KEY_VOLUME_DOWN); break; case evMute: mixer_mute(mixer); break; case evSetVolume: case ivSetVolume: guiInfo.Volume = param; { float l = guiInfo.Volume * (100.0 - guiInfo.Balance) / 50.0; float r = guiInfo.Volume * guiInfo.Balance / 50.0; mixer_setvolume(mixer, FFMIN(l, guiInfo.Volume), FFMIN(r, guiInfo.Volume)); } if (ev == ivSetVolume) break; if (osd_level) { osd_visible = (GetTimerMS() + 1000) | 1; vo_osd_progbar_type = OSD_VOLUME; vo_osd_progbar_value = guiInfo.Volume * 256.0 / 100.0; vo_osd_changed(OSDTYPE_PROGBAR); } break; case evSetBalance: case ivSetBalance: guiInfo.Balance = param; mixer_setbalance(mixer, (guiInfo.Balance - 50.0) / 50.0); // transform 0..100 to -1..1 uiEvent(ivSetVolume, guiInfo.Volume); if (ev == ivSetBalance) break; if (osd_level) { osd_visible = (GetTimerMS() + 1000) | 1; vo_osd_progbar_type = OSD_BALANCE; vo_osd_progbar_value = guiInfo.Balance * 256.0 / 100.0; vo_osd_changed(OSDTYPE_PROGBAR); } break;
开发者ID:0p1pp1,项目名称:mplayer,代码行数:66,
示例9: while///returns the number of samples filled in from start.//also updates data and len to reflect NEW unfilled area - start is unmodified.int ODFFmpegDecoder::FillDataFromCache(samplePtr & data, sampleFormat outFormat, sampleCount &start, sampleCount& len, unsigned int channel){ if(mDecodeCache.size() <= 0) return 0; int samplesFilled=0; //do a search for the best position to start at. //Guess that the array is evenly spaced from end to end - (dictionary sort) //assumes the array is sorted. //all we need for this to work is a location in the cache array //that has a start time of less than our start sample, but try to get closer with binary search int searchStart = 0; int searchEnd = mDecodeCache.size(); int guess; if(searchEnd>kODFFmpegSearchThreshold) { //first just guess that the cache is contiguous and we can just use math to figure it out like a dictionary //by guessing where our hit will be. while(searchStart+1<searchEnd) { guess = (searchStart+searchEnd)/2;//find a midpoint. //searchStart+ (searchEnd-searchStart)* ((float)start - mDecodeCache[searchStart]->start )/mDecodeCache[searchEnd]->start; //we want guess to point at the first index that hits even if there are duplicate start times (which can happen) if(mDecodeCache[guess]->start+mDecodeCache[guess]->len >= start) searchEnd = --guess; else searchStart = guess; } } //this is a sorted array for(int i=searchStart; i < (int)mDecodeCache.size(); i++) { //check for a cache hit - be careful to include the first/last sample an nothing more. //we only accept cache hits that touch either end - no piecing out of the middle. //this way the amount to be decoded remains set. if(start < mDecodeCache[i]->start+mDecodeCache[i]->len && start + len > mDecodeCache[i]->start) { uint8_t* outBuf; outBuf = (uint8_t*)data; //reject buffers that would split us into two pieces because we don't have //a method of dealing with this yet, and it won't happen very often. if(start<mDecodeCache[i]->start && start+len > mDecodeCache[i]->start+mDecodeCache[i]->len) continue; int samplesHit; int hitStartInCache; int hitStartInRequest; int nChannels = mDecodeCache[i]->numChannels; samplesHit = FFMIN(start+len,mDecodeCache[i]->start+mDecodeCache[i]->len) - FFMAX(mDecodeCache[i]->start,start); //find the start of the hit relative to the cache buffer start. hitStartInCache = FFMAX(0,start-mDecodeCache[i]->start); //we also need to find out which end was hit - if it is the tail only we need to update from a later index. hitStartInRequest = start <mDecodeCache[i]->start?len - samplesHit: 0; sampleCount outIndex,inIndex; for(int j=0;j<samplesHit;j++) { outIndex = hitStartInRequest + j; inIndex = (hitStartInCache + j) * nChannels + channel; switch (mDecodeCache[i]->samplefmt) { case AV_SAMPLE_FMT_U8: //printf("u8 in %llu out %llu cachelen %llu outLen %llu/n", inIndex, outIndex, mDecodeCache[i]->len, len); ((int16_t *)outBuf)[outIndex] = (int16_t) (((uint8_t*)mDecodeCache[i]->samplePtr)[inIndex] - 0x80) << 8; break; case AV_SAMPLE_FMT_S16: //printf("u16 in %llu out %llu cachelen %llu outLen %llu/n", inIndex, outIndex, mDecodeCache[i]->len, len); ((int16_t *)outBuf)[outIndex] = ((int16_t*)mDecodeCache[i]->samplePtr)[inIndex]; break; case AV_SAMPLE_FMT_S32: //printf("s32 in %llu out %llu cachelen %llu outLen %llu/n", inIndex, outIndex, mDecodeCache[i]->len, len); ((float *)outBuf)[outIndex] = (float) ((int32_t*)mDecodeCache[i]->samplePtr)[inIndex] * (1.0 / (1 << 31)); break; case AV_SAMPLE_FMT_FLT: //printf("f in %llu out %llu cachelen %llu outLen %llu/n", inIndex, outIndex, mDecodeCache[i]->len, len); ((float *)outBuf)[outIndex] = (float) ((float*)mDecodeCache[i]->samplePtr)[inIndex]; break; case AV_SAMPLE_FMT_DBL: //printf("dbl in %llu out %llu cachelen %llu outLen %llu/n", inIndex, outIndex, mDecodeCache[i]->len, len); ((float *)outBuf)[outIndex] = (float) ((double*)mDecodeCache[i]->samplePtr)[inIndex]; break; default: printf("ODDecodeFFMPEG TASK unrecognized sample format/n"); return 1; break; } } //update the cursor samplesFilled += samplesHit; //update the input start/len params - if the end was hit we can take off just len.//.........这里部分代码省略.........
开发者ID:zhenggzw,项目名称:audacity,代码行数:101,
示例10: read_headerstatic int read_header(AVFormatContext *s){ JVDemuxContext *jv = s->priv_data; AVIOContext *pb = s->pb; AVStream *vst, *ast; int64_t audio_pts = 0; int64_t offset; int i; avio_skip(pb, 80); ast = avformat_new_stream(s, NULL); vst = avformat_new_stream(s, NULL); if (!ast || !vst) return AVERROR(ENOMEM); vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_id = CODEC_ID_JV; vst->codec->codec_tag = 0; /* no fourcc */ vst->codec->width = avio_rl16(pb); vst->codec->height = avio_rl16(pb); vst->duration = vst->nb_frames = ast->nb_index_entries = avio_rl16(pb); avpriv_set_pts_info(vst, 64, avio_rl16(pb), 1000); avio_skip(pb, 4); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_id = CODEC_ID_PCM_U8; ast->codec->codec_tag = 0; /* no fourcc */ ast->codec->sample_rate = avio_rl16(pb); ast->codec->channels = 1; avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate); avio_skip(pb, 10); ast->index_entries = av_malloc(ast->nb_index_entries * sizeof(*ast->index_entries)); if (!ast->index_entries) return AVERROR(ENOMEM); jv->frames = av_malloc(ast->nb_index_entries * sizeof(JVFrame)); if (!jv->frames) return AVERROR(ENOMEM); offset = 0x68 + ast->nb_index_entries * 16; for(i = 0; i < ast->nb_index_entries; i++) { AVIndexEntry *e = ast->index_entries + i; JVFrame *jvf = jv->frames + i; /* total frame size including audio, video, palette data and padding */ e->size = avio_rl32(pb); e->timestamp = i; e->pos = offset; offset += e->size; jvf->audio_size = avio_rl32(pb); jvf->video_size = avio_rl32(pb); jvf->palette_size = avio_r8(pb) ? 768 : 0; jvf->video_size = FFMIN(FFMAX(jvf->video_size, 0), INT_MAX - JV_PREAMBLE_SIZE - jvf->palette_size); if (avio_r8(pb)) av_log(s, AV_LOG_WARNING, "unsupported audio codec/n"); jvf->video_type = avio_r8(pb); avio_skip(pb, 1); e->timestamp = jvf->audio_size ? audio_pts : AV_NOPTS_VALUE; audio_pts += jvf->audio_size; e->flags = jvf->video_type != 1 ? AVINDEX_KEYFRAME : 0; } jv->state = JV_AUDIO; return 0;}
开发者ID:KindDragon,项目名称:FFmpeg,代码行数:75,
示例11: mainint main(int argc, char *argv[]){ uint64_t i, j; uint64_t sse = 0; double sse_d = 0.0; FILE *f[2]; uint8_t buf[2][SIZE]; int len = 1; int64_t max; int shift = argc < 5 ? 0 : atoi(argv[4]); int skip_bytes = argc < 6 ? 0 : atoi(argv[5]); uint64_t size0 = 0; uint64_t size1 = 0; uint64_t maxdist = 0; double maxdist_d = 0.0; if (argc < 3) { printf("tiny_psnr <file1> <file2> [<elem size> [<shift> [<skip bytes>]]]/n"); printf("WAV headers are skipped automatically./n"); return 1; } if (argc > 3) { if (!strcmp(argv[3], "u8")) { len = 1; } else if (!strcmp(argv[3], "s16")) { len = 2; } else if (!strcmp(argv[3], "f32")) { len = 4; } else if (!strcmp(argv[3], "f64")) { len = 8; } else { char *end; len = strtol(argv[3], &end, 0); if (*end || len < 1 || len > 2) { fprintf(stderr, "Unsupported sample format: %s/n", argv[3]); return 1; } } } max = (1LL << (8 * len)) - 1; f[0] = fopen(argv[1], "rb"); f[1] = fopen(argv[2], "rb"); if (!f[0] || !f[1]) { fprintf(stderr, "Could not open input files./n"); return 1; } for (i = 0; i < 2; i++) { uint8_t *p = buf[i]; if (fread(p, 1, 12, f[i]) != 12) return 1; if (!memcmp(p, "RIFF", 4) && !memcmp(p + 8, "WAVE", 4)) { if (fread(p, 1, 8, f[i]) != 8) return 1; while (memcmp(p, "data", 4)) { int s = p[4] | p[5] << 8 | p[6] << 16 | p[7] << 24; fseek(f[i], s, SEEK_CUR); if (fread(p, 1, 8, f[i]) != 8) return 1; } } else { fseek(f[i], -12, SEEK_CUR); } } fseek(f[shift < 0], abs(shift), SEEK_CUR); fseek(f[0], skip_bytes, SEEK_CUR); fseek(f[1], skip_bytes, SEEK_CUR); for (;;) { int s0 = fread(buf[0], 1, SIZE, f[0]); int s1 = fread(buf[1], 1, SIZE, f[1]); for (j = 0; j < FFMIN(s0, s1); j += len) { switch (len) { case 1: case 2: { int64_t a = buf[0][j]; int64_t b = buf[1][j]; int dist; if (len == 2) { a = get_s16l(buf[0] + j); b = get_s16l(buf[1] + j); } else { a = buf[0][j]; b = buf[1][j]; } sse += (a - b) * (a - b); dist = llabs(a - b); if (dist > maxdist) maxdist = dist; break; } case 4: case 8: {//.........这里部分代码省略.........
开发者ID:Brainiarc7,项目名称:libav,代码行数:101,
示例12: rtp_write_headerstatic int rtp_write_header(AVFormatContext *s1){ RTPMuxContext *s = s1->priv_data; int n, ret = AVERROR(EINVAL); AVStream *st; if (s1->nb_streams != 1) { av_log(s1, AV_LOG_ERROR, "Only one stream supported in the RTP muxer/n"); return AVERROR(EINVAL); } st = s1->streams[0]; if (!is_supported(st->codecpar->codec_id)) { av_log(s1, AV_LOG_ERROR, "Unsupported codec %s/n", avcodec_get_name(st->codecpar->codec_id)); return -1; } if (s->payload_type < 0) { /* Re-validate non-dynamic payload types */ if (st->id < RTP_PT_PRIVATE) st->id = ff_rtp_get_payload_type(s1, st->codecpar, -1); s->payload_type = st->id; } else { /* private option takes priority */ st->id = s->payload_type; } s->base_timestamp = av_get_random_seed(); s->timestamp = s->base_timestamp; s->cur_timestamp = 0; if (!s->ssrc) s->ssrc = av_get_random_seed(); s->first_packet = 1; s->first_rtcp_ntp_time = ff_ntp_time(); if (s1->start_time_realtime != 0 && s1->start_time_realtime != AV_NOPTS_VALUE) /* Round the NTP time to whole milliseconds. */ s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 + NTP_OFFSET_US; // Pick a random sequence start number, but in the lower end of the // available range, so that any wraparound doesn't happen immediately. // (Immediate wraparound would be an issue for SRTP.) if (s->seq < 0) { if (s1->flags & AVFMT_FLAG_BITEXACT) { s->seq = 0; } else s->seq = av_get_random_seed() & 0x0fff; } else s->seq &= 0xffff; // Use the given parameter, wrapped to the right interval if (s1->packet_size) { if (s1->pb->max_packet_size) s1->packet_size = FFMIN(s1->packet_size, s1->pb->max_packet_size); } else s1->packet_size = s1->pb->max_packet_size; if (s1->packet_size <= 12) { av_log(s1, AV_LOG_ERROR, "Max packet size %d too low/n", s1->packet_size); return AVERROR(EIO); } s->buf = av_malloc(s1->packet_size); if (!s->buf) { return AVERROR(ENOMEM); } s->max_payload_size = s1->packet_size - 12; if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { avpriv_set_pts_info(st, 32, 1, st->codecpar->sample_rate); } else { avpriv_set_pts_info(st, 32, 1, 90000); } s->buf_ptr = s->buf; switch(st->codecpar->codec_id) { case AV_CODEC_ID_MP2: case AV_CODEC_ID_MP3: s->buf_ptr = s->buf + 4; avpriv_set_pts_info(st, 32, 1, 90000); break; case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: break; case AV_CODEC_ID_MPEG2TS: n = s->max_payload_size / TS_PACKET_SIZE; if (n < 1) n = 1; s->max_payload_size = n * TS_PACKET_SIZE; break; case AV_CODEC_ID_H261: if (s1->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(s, AV_LOG_ERROR, "Packetizing H261 is experimental and produces incorrect " "packetization for cases where GOBs don't fit into packets " "(even though most receivers may handle it just fine). " "Please set -f_strict experimental in order to enable it./n"); ret = AVERROR_EXPERIMENTAL; goto fail; } break; case AV_CODEC_ID_H264: /* check for H.264 MP4 syntax *///.........这里部分代码省略.........
开发者ID:0day-ci,项目名称:FFmpeg,代码行数:101,
示例13: select_framestatic void select_frame(AVFilterContext *ctx, AVFrame *frame){ SelectContext *select = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; double res; if (isnan(select->var_values[VAR_START_PTS])) select->var_values[VAR_START_PTS] = TS2D(frame->pts); if (isnan(select->var_values[VAR_START_T])) select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base); select->var_values[VAR_N ] = inlink->frame_count; select->var_values[VAR_PTS] = TS2D(frame->pts); select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base); select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); select->var_values[VAR_KEY] = frame->key_frame; select->var_values[VAR_CONCATDEC_SELECT] = get_concatdec_select(frame, av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q)); switch (inlink->type) { case AVMEDIA_TYPE_AUDIO: select->var_values[VAR_SAMPLES_N] = frame->nb_samples; break; case AVMEDIA_TYPE_VIDEO: select->var_values[VAR_INTERLACE_TYPE] = !frame->interlaced_frame ? INTERLACE_TYPE_P : frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; select->var_values[VAR_PICT_TYPE] = frame->pict_type; if (select->do_scene_detect) { char buf[32]; select->var_values[VAR_SCENE] = get_scene_score(ctx, frame); // TODO: document metadata snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0); } break; } select->select = res = av_expr_eval(select->expr, select->var_values, NULL); av_log(inlink->dst, AV_LOG_DEBUG, "n:%f pts:%f t:%f key:%d", select->var_values[VAR_N], select->var_values[VAR_PTS], select->var_values[VAR_T], frame->key_frame); switch (inlink->type) { case AVMEDIA_TYPE_VIDEO: av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f", (!frame->interlaced_frame) ? 'P' : frame->top_field_first ? 'T' : 'B', av_get_picture_type_char(frame->pict_type), select->var_values[VAR_SCENE]); break; case AVMEDIA_TYPE_AUDIO: av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f", frame->nb_samples, select->var_values[VAR_CONSUMED_SAMPLES_N]); break; } if (res == 0) { select->select_out = -1; /* drop */ } else if (isnan(res) || res < 0) { select->select_out = 0; /* first output */ } else { select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */ } av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d/n", res, select->select_out); if (res) { select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N]; select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS]; select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; select->var_values[VAR_SELECTED_N] += 1.0; if (inlink->type == AVMEDIA_TYPE_AUDIO) select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples; } select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS]; select->var_values[VAR_PREV_T] = select->var_values[VAR_T];}
开发者ID:0day-ci,项目名称:FFmpeg,代码行数:83,
示例14: flac_parsestatic int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size){ FLACParseContext *fpc = s->priv_data; FLACHeaderMarker *curr; int nb_headers; const uint8_t *read_end = buf; const uint8_t *read_start = buf; if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { FLACFrameInfo fi; if (frame_header_is_valid(avctx, buf, &fi)) { s->duration = fi.blocksize; if (!avctx->sample_rate) avctx->sample_rate = fi.samplerate; if (fpc->pc->flags & PARSER_FLAG_USE_CODEC_TS){ fpc->pc->pts = fi.frame_or_sample_num; if (!fi.is_var_size) fpc->pc->pts *= fi.blocksize; } } *poutbuf = buf; *poutbuf_size = buf_size; return buf_size; } fpc->avctx = avctx; if (fpc->best_header_valid) return get_best_header(fpc, poutbuf, poutbuf_size); /* If a best_header was found last call remove it with the buffer data. */ if (fpc->best_header && fpc->best_header->best_child) { FLACHeaderMarker *temp; FLACHeaderMarker *best_child = fpc->best_header->best_child; /* Remove headers in list until the end of the best_header. */ for (curr = fpc->headers; curr != best_child; curr = temp) { if (curr != fpc->best_header) { av_log(avctx, AV_LOG_DEBUG, "dropping low score %i frame header from offset %i to %i/n", curr->max_score, curr->offset, curr->next->offset); } temp = curr->next; av_freep(&curr->link_penalty); av_free(curr); fpc->nb_headers_buffered--; } /* Release returned data from ring buffer. */ av_fifo_drain(fpc->fifo_buf, best_child->offset); /* Fix the offset for the headers remaining to match the new buffer. */ for (curr = best_child->next; curr; curr = curr->next) curr->offset -= best_child->offset; fpc->nb_headers_buffered--; best_child->offset = 0; fpc->headers = best_child; if (fpc->nb_headers_buffered >= FLAC_MIN_HEADERS) { fpc->best_header = best_child; return get_best_header(fpc, poutbuf, poutbuf_size); } fpc->best_header = NULL; } else if (fpc->best_header) { /* No end frame no need to delete the buffer; probably eof */ FLACHeaderMarker *temp; for (curr = fpc->headers; curr != fpc->best_header; curr = temp) { temp = curr->next; av_freep(&curr->link_penalty); av_free(curr); } fpc->headers = fpc->best_header->next; av_freep(&fpc->best_header->link_penalty); av_freep(&fpc->best_header); } /* Find and score new headers. */ /* buf_size is to zero when padding, so check for this since we do */ /* not want to try to read more input once we have found the end. */ /* Note that as (non-modified) parameters, buf can be non-NULL, */ /* while buf_size is 0. */ while ((buf && buf_size && read_end < buf + buf_size && fpc->nb_headers_buffered < FLAC_MIN_HEADERS) || ((!buf || !buf_size) && !fpc->end_padded)) { int start_offset; /* Pad the end once if EOF, to check the final region for headers. */ if (!buf || !buf_size) { fpc->end_padded = 1; buf_size = MAX_FRAME_HEADER_SIZE; read_end = read_start + MAX_FRAME_HEADER_SIZE; } else { /* The maximum read size is the upper-bound of what the parser needs to have the required number of frames buffered */ int nb_desired = FLAC_MIN_HEADERS - fpc->nb_headers_buffered + 1; read_end = read_end + FFMIN(buf + buf_size - read_end, nb_desired * FLAC_AVG_FRAME_SIZE); }//.........这里部分代码省略.........
开发者ID:chloette,项目名称:ffmpeg4android,代码行数:101,
示例15: lumRangeToJpeg_cstatic void lumRangeToJpeg_c(int16_t *dst, int width){ int i; for (i = 0; i < width; i++) dst[i] = (FFMIN(dst[i], 30189) * 19077 - 39057361) >> 14;}
开发者ID:LittleKey,项目名称:FFmpeg,代码行数:6,
示例16: find_frame_end/** * Find the end of the current frame in the bitstream. * @return the position of the first byte of the next frame, or -1 */static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_size){ ParseContext *pc= &m->pc; int vop_found, i; uint32_t state; vop_found= pc->frame_start_found; state= pc->state; i=0; if(!vop_found){ for(i=0; i<buf_size;){ state= (state<<8) | buf[i]; if(state>=0xFFC00000 && state<=0xFFFEFFFF){ if(state>=0xFFD80000 && state<=0xFFD8FFFF){ i++; vop_found=1; break; }else if(state<0xFFD00000 || state>0xFFD9FFFF){ m->size= (state&0xFFFF)-1; } } if(m->size>0){ int size= FFMIN(buf_size-i, m->size); i+=size; m->size-=size; state=0; continue; }else i++; } } if(vop_found){ /* EOF considered as end of frame */ if (buf_size == 0) return 0; for(; i<buf_size;){ state= (state<<8) | buf[i]; if(state>=0xFFC00000 && state<=0xFFFEFFFF){ if(state>=0xFFD80000 && state<=0xFFD8FFFF){ pc->frame_start_found=0; pc->state=0; return i-3; } else if(state<0xFFD00000 || state>0xFFD9FFFF){ m->size= (state&0xFFFF)-1; } } if(m->size>0){ int size= FFMIN(buf_size-i, m->size); i+=size; m->size-=size; state=0; continue; }else i++; } } pc->frame_start_found= vop_found; pc->state= state; return END_NOT_FOUND;}
开发者ID:OpenEmu,项目名称:VICE-Core,代码行数:65,
示例17: swscale//.........这里部分代码省略......... srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1); ff_init_slice_from_src(vout_slice, (uint8_t**)dst, dstStride, c->dstW, dstY, dstH, dstY >> c->chrDstVSubSample, FF_CEIL_RSHIFT(dstH, c->chrDstVSubSample), 0); if (srcSliceY == 0) { hout_slice->plane[0].sliceY = lastInLumBuf + 1; hout_slice->plane[1].sliceY = lastInChrBuf + 1; hout_slice->plane[2].sliceY = lastInChrBuf + 1; hout_slice->plane[3].sliceY = lastInLumBuf + 1; hout_slice->plane[0].sliceH = hout_slice->plane[1].sliceH = hout_slice->plane[2].sliceH = hout_slice->plane[3].sliceH = 0; hout_slice->width = dstW; }#endif for (; dstY < dstH; dstY++) { const int chrDstY = dstY >> c->chrDstVSubSample;#ifndef NEW_FILTER uint8_t *dest[4] = { dst[0] + dstStride[0] * dstY, dst[1] + dstStride[1] * chrDstY, dst[2] + dstStride[2] * chrDstY, (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3] + dstStride[3] * dstY : NULL, };#endif int use_mmx_vfilter= c->use_mmx_vfilter; // First line needed as input const int firstLumSrcY = FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]); const int firstLumSrcY2 = FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1 << c->chrDstVSubSample) - 1), dstH - 1)]); // First line needed as input const int firstChrSrcY = FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]); // Last line needed as input int lastLumSrcY = FFMIN(c->srcH, firstLumSrcY + vLumFilterSize) - 1; int lastLumSrcY2 = FFMIN(c->srcH, firstLumSrcY2 + vLumFilterSize) - 1; int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1; int enough_lines;#ifdef NEW_FILTER int i; int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;#endif // handle holes (FAST_BILINEAR & weird filters) if (firstLumSrcY > lastInLumBuf) {#ifdef NEW_FILTER hasLumHoles = lastInLumBuf != firstLumSrcY - 1; if (hasLumHoles) { hout_slice->plane[0].sliceY = firstLumSrcY; hout_slice->plane[3].sliceY = firstLumSrcY; hout_slice->plane[0].sliceH = hout_slice->plane[3].sliceH = 0; }#endif lastInLumBuf = firstLumSrcY - 1; } if (firstChrSrcY > lastInChrBuf) {#ifdef NEW_FILTER hasChrHoles = lastInChrBuf != firstChrSrcY - 1; if (hasChrHoles) { hout_slice->plane[1].sliceY = firstChrSrcY; hout_slice->plane[2].sliceY = firstChrSrcY;
开发者ID:LittleKey,项目名称:FFmpeg,代码行数:67,
示例18: vp5_parse_coeffstatic void vp5_parse_coeff(VP56Context *s){ VP56RangeCoder *c = &s->c; VP56Model *model = s->modelp; uint8_t *permute = s->scantable.permutated; uint8_t *model1, *model2; int coeff, sign, coeff_idx; int b, i, cg, idx, ctx, ctx_last; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ for (b=0; b<6; b++) { int ct = 1; /* code type */ if (b > 3) pt = 1; ctx = 6*s->coeff_ctx[vp56_b6to4[b]][0] + s->above_blocks[s->above_block_idx[b]].not_null_dc; model1 = model->coeff_dccv[pt]; model2 = model->coeff_dcct[pt][ctx]; for (coeff_idx=0; coeff_idx<64; ) { if (vp56_rac_get_prob(c, model2[0])) { if (vp56_rac_get_prob(c, model2[2])) { if (vp56_rac_get_prob(c, model2[3])) { s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 4; idx = vp56_rac_get_tree(c, vp56_pc_tree, model1); sign = vp56_rac_get(c); coeff = vp56_coeff_bias[idx+5]; for (i=vp56_coeff_bit_length[idx]; i>=0; i--) coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i; } else { if (vp56_rac_get_prob(c, model2[4])) { coeff = 3 + vp56_rac_get_prob(c, model1[5]); s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 3; } else { coeff = 2; s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 2; } sign = vp56_rac_get(c); } ct = 2; } else { ct = 1; s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 1; sign = vp56_rac_get(c); coeff = 1; } coeff = (coeff ^ -sign) + sign; if (coeff_idx) coeff *= s->dequant_ac; s->block_coeff[b][permute[coeff_idx]] = coeff; } else { if (ct && !vp56_rac_get_prob(c, model2[1])) break; ct = 0; s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 0; } cg = vp5_coeff_groups[++coeff_idx]; ctx = s->coeff_ctx[vp56_b6to4[b]][coeff_idx]; model1 = model->coeff_ract[pt][ct][cg]; model2 = cg > 2 ? model1 : model->coeff_acct[pt][ct][cg][ctx]; } ctx_last = FFMIN(s->coeff_ctx_last[vp56_b6to4[b]], 24); s->coeff_ctx_last[vp56_b6to4[b]] = coeff_idx; if (coeff_idx < ctx_last) for (i=coeff_idx; i<=ctx_last; i++) s->coeff_ctx[vp56_b6to4[b]][i] = 5; s->above_blocks[s->above_block_idx[b]].not_null_dc = s->coeff_ctx[vp56_b6to4[b]][0]; }}
开发者ID:AndyA,项目名称:ffmbc,代码行数:72,
示例19: ff_updateMMXDitherTablesvoid ff_updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex, int lastInLumBuf, int lastInChrBuf){ const int dstH= c->dstH; const int flags= c->flags; int16_t **lumPixBuf= c->lumPixBuf; int16_t **chrUPixBuf= c->chrUPixBuf; int16_t **alpPixBuf= c->alpPixBuf; const int vLumBufSize= c->vLumBufSize; const int vChrBufSize= c->vChrBufSize; int32_t *vLumFilterPos= c->vLumFilterPos; int32_t *vChrFilterPos= c->vChrFilterPos; int16_t *vLumFilter= c->vLumFilter; int16_t *vChrFilter= c->vChrFilter; int32_t *lumMmxFilter= c->lumMmxFilter; int32_t *chrMmxFilter= c->chrMmxFilter; int32_t av_unused *alpMmxFilter= c->alpMmxFilter; const int vLumFilterSize= c->vLumFilterSize; const int vChrFilterSize= c->vChrFilterSize; const int chrDstY= dstY>>c->chrDstVSubSample; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input c->blueDither= ff_dither8[dstY&1]; if (c->dstFormat == AV_PIX_FMT_RGB555 || c->dstFormat == AV_PIX_FMT_BGR555) c->greenDither= ff_dither8[dstY&1]; else c->greenDither= ff_dither4[dstY&1]; c->redDither= ff_dither8[(dstY+1)&1]; if (dstY < dstH - 2) { const int16_t **lumSrcPtr= (const int16_t **)(void*) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; const int16_t **chrUSrcPtr= (const int16_t **)(void*) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)(void*) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL; int i; if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->srcH) { const int16_t **tmpY = (const int16_t **) lumPixBuf + 2 * vLumBufSize; int neg = -firstLumSrcY, i, end = FFMIN(c->srcH - firstLumSrcY, vLumFilterSize); for (i = 0; i < neg; i++) tmpY[i] = lumSrcPtr[neg]; for ( ; i < end; i++) tmpY[i] = lumSrcPtr[i]; for ( ; i < vLumFilterSize; i++) tmpY[i] = tmpY[i-1]; lumSrcPtr = tmpY; if (alpSrcPtr) { const int16_t **tmpA = (const int16_t **) alpPixBuf + 2 * vLumBufSize; for (i = 0; i < neg; i++) tmpA[i] = alpSrcPtr[neg]; for ( ; i < end; i++) tmpA[i] = alpSrcPtr[i]; for ( ; i < vLumFilterSize; i++) tmpA[i] = tmpA[i - 1]; alpSrcPtr = tmpA; } } if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->chrSrcH) { const int16_t **tmpU = (const int16_t **) chrUPixBuf + 2 * vChrBufSize; int neg = -firstChrSrcY, i, end = FFMIN(c->chrSrcH - firstChrSrcY, vChrFilterSize); for (i = 0; i < neg; i++) { tmpU[i] = chrUSrcPtr[neg]; } for ( ; i < end; i++) { tmpU[i] = chrUSrcPtr[i]; } for ( ; i < vChrFilterSize; i++) { tmpU[i] = tmpU[i - 1]; } chrUSrcPtr = tmpU; } if (flags & SWS_ACCURATE_RND) { int s= APCK_SIZE / 8; for (i=0; i<vLumFilterSize; i+=2) { *(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ]; *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)]; lumMmxFilter[s*i+APCK_COEF/4 ]= lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ] + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); if (CONFIG_SWSCALE_ALPHA && alpPixBuf) { *(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ]; *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)]; alpMmxFilter[s*i+APCK_COEF/4 ]= alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ]; } } for (i=0; i<vChrFilterSize; i+=2) { *(const void**)&chrMmxFilter[s*i ]= chrUSrcPtr[i ]; *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrUSrcPtr[i+(vChrFilterSize>1)]; chrMmxFilter[s*i+APCK_COEF/4 ]= chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ] + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); } } else { for (i=0; i<vLumFilterSize; i++) { *(const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i]; lumMmxFilter[4*i+2]= lumMmxFilter[4*i+3]= ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001U;//.........这里部分代码省略.........
开发者ID:309746069,项目名称:FFmpeg,代码行数:101,
示例20: mov_text_decode_framestatic int mov_text_decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt){ AVSubtitle *sub = data; MovTextContext *m = avctx->priv_data; int ret, ts_start, ts_end; AVBPrint buf; char *ptr = avpkt->data; char *end; int text_length, tsmb_type, ret_tsmb; uint64_t tsmb_size; const uint8_t *tsmb; if (!ptr || avpkt->size < 2) return AVERROR_INVALIDDATA; /* * A packet of size two with value zero is an empty subtitle * used to mark the end of the previous non-empty subtitle. * We can just drop them here as we have duration information * already. If the value is non-zero, then it's technically a * bad packet. */ if (avpkt->size == 2) return AV_RB16(ptr) == 0 ? 0 : AVERROR_INVALIDDATA; /* * The first two bytes of the packet are the length of the text string * In complex cases, there are style descriptors appended to the string * so we can't just assume the packet size is the string size. */ text_length = AV_RB16(ptr); end = ptr + FFMIN(2 + text_length, avpkt->size); ptr += 2; ts_start = av_rescale_q(avpkt->pts, avctx->time_base, (AVRational) { 1,100 }); ts_end = av_rescale_q(avpkt->pts + avpkt->duration, avctx->time_base, (AVRational) { 1,100 }); tsmb_size = 0; m->tracksize = 2 + text_length; m->style_entries = 0; m->box_flags = 0; m->count_s = 0; // Note that the spec recommends lines be no longer than 2048 characters. av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED); if (text_length + 2 != avpkt->size) { while (m->tracksize + 8 <= avpkt->size) { // A box is a minimum of 8 bytes. tsmb = ptr + m->tracksize - 2; tsmb_size = AV_RB32(tsmb); tsmb += 4; tsmb_type = AV_RB32(tsmb); tsmb += 4; if (tsmb_size == 1) { if (m->tracksize + 16 > avpkt->size) break; tsmb_size = AV_RB64(tsmb); tsmb += 8; m->size_var = 16; } else m->size_var = 8; //size_var is equal to 8 or 16 depending on the size of box if (m->tracksize + tsmb_size > avpkt->size) break; for (size_t i = 0; i < box_count; i++) { if (tsmb_type == box_types[i].type) { if (m->tracksize + m->size_var + box_types[i].base_size > avpkt->size) break; ret_tsmb = box_types[i].decode(tsmb, m, avpkt); if (ret_tsmb == -1) break; } } m->tracksize = m->tracksize + tsmb_size; } text_to_ass(&buf, ptr, end, m); mov_text_cleanup(m); } else text_to_ass(&buf, ptr, end, m); ret = ff_ass_add_rect_bprint(sub, &buf, ts_start, ts_end - ts_start); av_bprint_finalize(&buf, NULL); if (ret < 0) return ret; *got_sub_ptr = sub->num_rects > 0; return avpkt->size;}
开发者ID:zzilla,项目名称:CodeCollection,代码行数:98,
示例21: FFMINstatic ResampleContext *resample_init(ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff0, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational){ double cutoff = cutoff0? cutoff0 : 0.97; double factor= FFMIN(out_rate * cutoff / in_rate, 1.0); int phase_count= 1<<phase_shift; int phase_count_compensation = phase_count; if (exact_rational) { int phase_count_exact, phase_count_exact_den; av_reduce(&phase_count_exact, &phase_count_exact_den, out_rate, in_rate, INT_MAX); if (phase_count_exact <= phase_count) { phase_count_compensation = phase_count_exact * (phase_count / phase_count_exact); phase_count = phase_count_exact; } } if (!c || c->phase_count != phase_count || c->linear!=linear || c->factor != factor || c->filter_length != FFMAX((int)ceil(filter_size/factor), 1) || c->format != format || c->filter_type != filter_type || c->kaiser_beta != kaiser_beta) { c = av_mallocz(sizeof(*c)); if (!c) return NULL; c->format= format; c->felem_size= av_get_bytes_per_sample(c->format); switch(c->format){ case AV_SAMPLE_FMT_S16P: c->filter_shift = 15; break; case AV_SAMPLE_FMT_S32P: c->filter_shift = 30; break; case AV_SAMPLE_FMT_FLTP: case AV_SAMPLE_FMT_DBLP: c->filter_shift = 0; break; default: av_log(NULL, AV_LOG_ERROR, "Unsupported sample format/n"); av_assert0(0); } if (filter_size/factor > INT32_MAX/256) { av_log(NULL, AV_LOG_ERROR, "Filter length too large/n"); goto error; } c->phase_count = phase_count; c->linear = linear; c->factor = factor; c->filter_length = FFMAX((int)ceil(filter_size/factor), 1); c->filter_alloc = FFALIGN(c->filter_length, 8); c->filter_bank = av_calloc(c->filter_alloc, (phase_count+1)*c->felem_size); c->filter_type = filter_type; c->kaiser_beta = kaiser_beta; c->phase_count_compensation = phase_count_compensation; if (!c->filter_bank) goto error; if (build_filter(c, (void*)c->filter_bank, factor, c->filter_length, c->filter_alloc, phase_count, 1<<c->filter_shift, filter_type, kaiser_beta)) goto error; memcpy(c->filter_bank + (c->filter_alloc*phase_count+1)*c->felem_size, c->filter_bank, (c->filter_alloc-1)*c->felem_size); memcpy(c->filter_bank + (c->filter_alloc*phase_count )*c->felem_size, c->filter_bank + (c->filter_alloc - 1)*c->felem_size, c->felem_size); } c->compensation_distance= 0; if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2)) goto error; while (c->dst_incr < (1<<20) && c->src_incr < (1<<20)) { c->dst_incr *= 2; c->src_incr *= 2; } c->ideal_dst_incr = c->dst_incr; c->dst_incr_div = c->dst_incr / c->src_incr; c->dst_incr_mod = c->dst_incr % c->src_incr; c->index= -phase_count*((c->filter_length-1)/2); c->frac= 0; swri_resample_dsp_init(c); return c;error: av_freep(&c->filter_bank); av_free(c); return NULL;}
开发者ID:jpcottin,项目名称:FFmpeg,代码行数:90,
示例22: filter_framestatic int filter_frame(AVFilterLink *inlink, AVFrame *frame){ AVFilterContext *ctx = inlink->dst; FadeContext *s = ctx->priv; double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base); // Calculate Fade assuming this is a Fade In if (s->fade_state == VF_FADE_WAITING) { s->factor=0; if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE && inlink->frame_count >= s->start_frame) { // Time to start fading s->fade_state = VF_FADE_FADING; // Save start time in case we are starting based on frames and fading based on time if (s->start_time == 0 && s->start_frame != 0) { s->start_time = frame_timestamp*(double)AV_TIME_BASE; } // Save start frame in case we are starting based on time and fading based on frames if (s->start_time != 0 && s->start_frame == 0) { s->start_frame = inlink->frame_count; } } } if (s->fade_state == VF_FADE_FADING) { if (s->duration == 0) { // Fading based on frame count s->factor = (inlink->frame_count - s->start_frame) * s->fade_per_frame; if (inlink->frame_count > s->start_frame + s->nb_frames) { s->fade_state = VF_FADE_DONE; } } else { // Fading based on duration s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE) * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE); if (frame_timestamp > s->start_time/(double)AV_TIME_BASE + s->duration/(double)AV_TIME_BASE) { s->fade_state = VF_FADE_DONE; } } } if (s->fade_state == VF_FADE_DONE) { s->factor=UINT16_MAX; } s->factor = av_clip_uint16(s->factor); // Invert fade_factor if Fading Out if (s->type == FADE_OUT) { s->factor=UINT16_MAX-s->factor; } if (s->factor < UINT16_MAX) { if (s->alpha) { ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL, FFMIN(frame->height, ctx->graph->nb_threads)); } else if (s->is_packed_rgb && !s->black_fade) { ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL, FFMIN(frame->height, ctx->graph->nb_threads)); } else { /* luma, or rgb plane in case of black */ ctx->internal->execute(ctx, filter_slice_luma, frame, NULL, FFMIN(frame->height, ctx->graph->nb_threads)); if (frame->data[1] && frame->data[2]) { /* chroma planes */ ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL, FFMIN(frame->height, ctx->graph->nb_threads)); } } } return ff_filter_frame(inlink->dst->outputs[0], frame);}
开发者ID:309746069,项目名称:FFmpeg,代码行数:76,
示例23: ff_rtmp_packet_writeint ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt, int chunk_size, RTMPPacket *prev_pkt){ uint8_t pkt_hdr[16], *p = pkt_hdr; int mode = RTMP_PS_TWELVEBYTES; int off = 0; int size = 0; pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp; //if channel_id = 0, this is first presentation of prev_pkt, send full hdr. if (prev_pkt[pkt->channel_id].channel_id && pkt->extra == prev_pkt[pkt->channel_id].extra) { if (pkt->type == prev_pkt[pkt->channel_id].type && pkt->data_size == prev_pkt[pkt->channel_id].data_size) { mode = RTMP_PS_FOURBYTES; if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta) mode = RTMP_PS_ONEBYTE; } else { mode = RTMP_PS_EIGHTBYTES; } } if (pkt->channel_id < 64) { bytestream_put_byte(&p, pkt->channel_id | (mode << 6)); } else if (pkt->channel_id < 64 + 256) { bytestream_put_byte(&p, 0 | (mode << 6)); bytestream_put_byte(&p, pkt->channel_id - 64); } else { bytestream_put_byte(&p, 1 | (mode << 6)); bytestream_put_le16(&p, pkt->channel_id - 64); } if (mode != RTMP_PS_ONEBYTE) { uint32_t timestamp = pkt->timestamp; if (mode != RTMP_PS_TWELVEBYTES) timestamp = pkt->ts_delta; bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp); if (mode != RTMP_PS_FOURBYTES) { bytestream_put_be24(&p, pkt->data_size); bytestream_put_byte(&p, pkt->type); if (mode == RTMP_PS_TWELVEBYTES) bytestream_put_le32(&p, pkt->extra); } if (timestamp >= 0xFFFFFF) bytestream_put_be32(&p, timestamp); } // save history prev_pkt[pkt->channel_id].channel_id = pkt->channel_id; prev_pkt[pkt->channel_id].type = pkt->type; prev_pkt[pkt->channel_id].data_size = pkt->data_size; prev_pkt[pkt->channel_id].timestamp = pkt->timestamp; if (mode != RTMP_PS_TWELVEBYTES) { prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta; } else { prev_pkt[pkt->channel_id].ts_delta = pkt->timestamp; } prev_pkt[pkt->channel_id].extra = pkt->extra; ffurl_write(h, pkt_hdr, p-pkt_hdr); size = p - pkt_hdr + pkt->data_size; while (off < pkt->data_size) { int towrite = FFMIN(chunk_size, pkt->data_size - off); ffurl_write(h, pkt->data + off, towrite); off += towrite; if (off < pkt->data_size) { uint8_t marker = 0xC0 | pkt->channel_id; ffurl_write(h, &marker, 1); size++; } } return size;}
开发者ID:Fatbag,项目名称:libav,代码行数:72,
示例24: x8_setup_spatial_compensation/** Collect statistics and prepare the edge pixels required by the other spatial compensation functions. * @param src pointer to the beginning of the processed block * @param dst pointer to emu_edge, edge pixels are stored the way other compensation routines do. * @param linesize byte offset between 2 vertical pixels in the source image * @param range pointer to the variable where the edge pixel range is to be stored (max-min values) * @param psum pointer to the variable where the edge pixel sum is to be stored * @param edges Informs this routine that the block is on an image border, so it has to interpolate the missing edge pixels. and some of the edge pixels should be interpolated, the flag has the following meaning: 1 - mb_x==0 - first block in the row, interpolate area #1,#2,#3; 2 - mb_y==0 - first row, interpolate area #3,#4,#5,#6; note: 1|2 - mb_x==mb_y==0 - first block, use 0x80 value for all areas; 4 - mb_x>= (mb_width-1) last block in the row, interpolate area #5;*/static void x8_setup_spatial_compensation(uint8_t *src, uint8_t *dst, int linesize, int * range, int * psum, int edges){ uint8_t * ptr; int sum; int i; int min_pix,max_pix; uint8_t c; if((edges&3)==3){ *psum=0x80*(8+1+8+2); *range=0; memset(dst,0x80,16+1+16+8); //this triggers flat_dc for sure. //flat_dc avoids all (other) prediction modes, but requires dc_level decoding. return; } min_pix=256; max_pix=-1; sum=0; if(!(edges&1)){//(mb_x!=0)//there is previous block on this row ptr=src-1;//left column, area 2 for(i=7;i>=0;i--){ c=*(ptr-1);//area1, same mb as area2, no need to check dst[area1+i]=c; c=*(ptr); sum+=c; min_pix=FFMIN(min_pix,c); max_pix=FFMAX(max_pix,c); dst[area2+i]=c; ptr+=linesize; } } if(!(edges&2)){ //(mb_y!=0)//there is row above ptr=src-linesize;//top line for(i=0;i<8;i++){ c=*(ptr+i); sum+=c; min_pix=FFMIN(min_pix, c); max_pix=FFMAX(max_pix, c); } if(edges&4){//last block on the row? memset(dst+area5,c,8);//set with last pixel fr memcpy(dst+area4, ptr, 8); }else{ memcpy(dst+area4, ptr, 16);//both area4 and 5 } memcpy(dst+area6, ptr-linesize, 8);//area6 always present in the above block } //now calculate the stuff we need if(edges&3){//mb_x==0 || mb_y==0){ int avg=(sum+4)>>3; if(edges&1){ //(mb_x==0) {//implies mb_y!=0 memset(dst+area1,avg,8+8+1);//areas 1,2 and 3 are averaged }else{//implies y==0 x!=0 memset(dst+area3,avg, 1+16+8);//areas 3, 4,5,6 } sum+=avg*9; }else{
开发者ID:248668342,项目名称:ffmpeg-windows,代码行数:79,
示例25: ff_rtmp_packet_readint ff_rtmp_packet_read(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket *prev_pkt){ uint8_t hdr, t, buf[16]; int channel_id, timestamp, data_size, offset = 0; uint32_t extra = 0; enum RTMPPacketType type; int size = 0; if (ffurl_read(h, &hdr, 1) != 1) return AVERROR(EIO); size++; channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); size += channel_id + 1; channel_id = AV_RL16(buf) + 64; } data_size = prev_pkt[channel_id].data_size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; if (hdr == RTMP_PS_ONEBYTE) { timestamp = prev_pkt[channel_id].ts_delta; } else { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); size += 3; timestamp = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); size += 3; data_size = AV_RB24(buf); if (ffurl_read_complete(h, buf, 1) != 1) return AVERROR(EIO); size++; type = buf[0]; if (hdr == RTMP_PS_TWELVEBYTES) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); size += 4; extra = AV_RL32(buf); } } if (timestamp == 0xFFFFFF) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); timestamp = AV_RB32(buf); } } if (hdr != RTMP_PS_TWELVEBYTES) timestamp += prev_pkt[channel_id].timestamp; if (ff_rtmp_packet_create(p, channel_id, type, timestamp, data_size)) return -1; p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].data_size = data_size; prev_pkt[channel_id].ts_delta = timestamp - prev_pkt[channel_id].timestamp; prev_pkt[channel_id].timestamp = timestamp; prev_pkt[channel_id].extra = extra; while (data_size > 0) { int toread = FFMIN(data_size, chunk_size); if (ffurl_read_complete(h, p->data + offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); } data_size -= chunk_size; offset += chunk_size; size += chunk_size; if (data_size > 0) { ffurl_read_complete(h, &t, 1); //marker size++; if (t != (0xC0 + channel_id)) return -1; } } return size;}
开发者ID:Fatbag,项目名称:libav,代码行数:86,
示例26: av_build_filter/** * builds a polyphase filterbank. * @param factor resampling factor * @param scale wanted sum of coefficients for each filter * @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16 */void av_build_filter(FELEM *filter, double factor, int tap_count, int phase_count, int scale, int type){ int ph, i; double x, y, w, tab[tap_count]; const int center= (tap_count-1)/2; /* if upsampling, only need to interpolate, no filter */ if (factor > 1.0) factor = 1.0; for(ph=0;ph<phase_count;ph++) { double norm = 0; for(i=0;i<tap_count;i++) { x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor; if (x == 0) y = 1.0; else y = sin(x) / x; switch(type){ case 0:{ const float d= -0.5; //first order derivative = -0.5 x = fabs(((double)(i - center) - (double)ph / phase_count) * factor); if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*( -x*x + x*x*x); else y= d*(-4 + 8*x - 5*x*x + x*x*x); break;} case 1: w = 2.0*x / (factor*tap_count) + M_PI; y *= 0.3635819 - 0.4891775 * cos(w) + 0.1365995 * cos(2*w) - 0.0106411 * cos(3*w); break; default: w = 2.0*x / (factor*tap_count*M_PI); y *= bessel(type*sqrt(FFMAX(1-w*w, 0))); break; } tab[i] = y; norm += y; } /* normalize so that an uniform color remains the same */ for(i=0;i<tap_count;i++) {#ifdef CONFIG_RESAMPLE_AUDIOPHILE_KIDDY_MODE filter[ph * tap_count + i] = tab[i] / norm;#else filter[ph * tap_count + i] = av_clip(lrintf(tab[i] * scale / norm), FELEM_MIN, FELEM_MAX);#endif } }#if 0 {#define LEN 1024 int j,k; double sine[LEN + tap_count]; double filtered[LEN]; double maxff=-2, minff=2, maxsf=-2, minsf=2; for(i=0; i<LEN; i++){ double ss=0, sf=0, ff=0; for(j=0; j<LEN+tap_count; j++) sine[j]= cos(i*j*M_PI/LEN); for(j=0; j<LEN; j++){ double sum=0; ph=0; for(k=0; k<tap_count; k++) sum += filter[ph * tap_count + k] * sine[k+j]; filtered[j]= sum / (1<<FILTER_SHIFT); ss+= sine[j + center] * sine[j + center]; ff+= filtered[j] * filtered[j]; sf+= sine[j + center] * filtered[j]; } ss= sqrt(2*ss/LEN); ff= sqrt(2*ff/LEN); sf= 2*sf/LEN; maxff= FFMAX(maxff, ff); minff= FFMIN(minff, ff); maxsf= FFMAX(maxsf, sf); minsf= FFMIN(minsf, sf); if(i%11==0){ av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e/n", i, ss, maxff, minff, maxsf, minsf); minff=minsf= 2; maxff=maxsf= -2; } } }#endif}
开发者ID:Acidburn0zzz,项目名称:ffmpeg-concat,代码行数:88,
示例27: ff_ac3_bit_alloc_calc_maskint ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd, int start, int end, int fast_gain, int is_lfe, int dba_mode, int dba_nsegs, uint8_t *dba_offsets, uint8_t *dba_lengths, uint8_t *dba_values, int16_t *mask){ int16_t excite[AC3_CRITICAL_BANDS]; /* excitation */ int band; int band_start, band_end, begin, end1; int lowcomp, fastleak, slowleak; /* excitation function */ band_start = bin_to_band_tab[start]; band_end = bin_to_band_tab[end-1] + 1; if (band_start == 0) { lowcomp = 0; lowcomp = calc_lowcomp1(lowcomp, band_psd[0], band_psd[1], 384); excite[0] = band_psd[0] - fast_gain - lowcomp; lowcomp = calc_lowcomp1(lowcomp, band_psd[1], band_psd[2], 384); excite[1] = band_psd[1] - fast_gain - lowcomp; begin = 7; for (band = 2; band < 7; band++) { if (!(is_lfe && band == 6)) lowcomp = calc_lowcomp1(lowcomp, band_psd[band], band_psd[band+1], 384); fastleak = band_psd[band] - fast_gain; slowleak = band_psd[band] - s->slow_gain; excite[band] = fastleak - lowcomp; if (!(is_lfe && band == 6)) { if (band_psd[band] <= band_psd[band+1]) { begin = band + 1; break; } } } end1 = FFMIN(band_end, 22); for (band = begin; band < end1; band++) { if (!(is_lfe && band == 6)) lowcomp = calc_lowcomp(lowcomp, band_psd[band], band_psd[band+1], band); fastleak = FFMAX(fastleak - s->fast_decay, band_psd[band] - fast_gain); slowleak = FFMAX(slowleak - s->slow_decay, band_psd[band] - s->slow_gain); excite[band] = FFMAX(fastleak - lowcomp, slowleak); } begin = 22; } else { /* coupling channel */ begin = band_start; fastleak = (s->cpl_fast_leak << 8) + 768; slowleak = (s->cpl_slow_leak << 8) + 768; } for (band = begin; band < band_end; band++) { fastleak = FFMAX(fastleak - s->fast_decay, band_psd[band] - fast_gain); slowleak = FFMAX(slowleak - s->slow_decay, band_psd[band] - s->slow_gain); excite[band] = FFMAX(fastleak, slowleak); } /* compute masking curve */ for (band = band_start; band < band_end; band++) { int tmp = s->db_per_bit - band_psd[band]; if (tmp > 0) { excite[band] += tmp >> 2; } mask[band] = FFMAX(ff_ac3_hearing_threshold_tab[band >> s->sr_shift][s->sr_code], excite[band]); }
开发者ID:MaTriXy,项目名称:GLWallpaperVideoDemo,代码行数:67,
示例28: update_subtitles//.........这里部分代码省略......... if (!vo_spudec) vo_spudec = spudec_new(NULL); if (vo_vobsub || timestamp >= 0) spudec_assemble(vo_spudec, packet, len, timestamp); } } else if (is_text_sub(type) || is_av_sub(type) || type == 'd' || type == 'c') { int orig_type = type; double endpts; if (type == 'd' && !d_dvdsub->demuxer->teletext) { tt_stream_props tsp = {0}; void *ptr = &tsp; if (teletext_control(NULL, TV_VBI_CONTROL_START, &ptr) == VBI_CONTROL_TRUE) d_dvdsub->demuxer->teletext = ptr; } if (d_dvdsub->non_interleaved) ds_get_next_pts(d_dvdsub); while (1) { double subpts = curpts; type = orig_type; len = ds_get_packet_sub(d_dvdsub, &packet, &subpts, &endpts); if (len < 0) break; if (is_av_sub(type)) {#ifdef CONFIG_FFMPEG type = decode_avsub(d_dvdsub->sh, &packet, &len, &subpts, &endpts); if (type < 0) mp_msg(MSGT_SPUDEC, MSGL_WARN, "lavc failed decoding subtitle/n"); if (type <= 0)#endif continue; } if (type == 'm') { if (len < 2) continue; len = FFMIN(len - 2, AV_RB16(packet)); packet += 2; } if (type == 'd') { if (d_dvdsub->demuxer->teletext) { uint8_t *p = packet; if (len == 3124) { // wtv subtitle-only format while (len >= 42) { teletext_control(d_dvdsub->demuxer->teletext, TV_VBI_CONTROL_DECODE_LINE, p); p += 42; len -= 42; } return; } p++; len--; while (len >= 46) { int sublen = p[1]; if (p[0] == 2 || p[0] == 3) teletext_control(d_dvdsub->demuxer->teletext, TV_VBI_CONTROL_DECODE_DVB, p + 2); p += sublen + 2; len -= sublen + 2; } } continue; } if (type == 'c') { subcc_process_data(packet, len); continue; }#ifdef CONFIG_ASS
开发者ID:pder,项目名称:mplayer-svn,代码行数:67,
注:本文中的FFMIN函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ FFT函数代码示例 C++ FFMAX函数代码示例 |