这篇教程C++ sws_freeContext函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中sws_freeContext函数的典型用法代码示例。如果您正苦于以下问题:C++ sws_freeContext函数的具体用法?C++ sws_freeContext怎么用?C++ sws_freeContext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了sws_freeContext函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: run//.........这里部分代码省略......... break; } break; } // check for the keyboard state for moving keys Uint8 *state = SDL_GetKeyboardState(NULL); if (state[SDL_SCANCODE_I] || state[SDL_SCANCODE_J] || state[SDL_SCANCODE_K] || state[SDL_SCANCODE_L] || state[SDL_SCANCODE_W] || state[SDL_SCANCODE_S] || state[SDL_SCANCODE_A] || state[SDL_SCANCODE_D] || state[SDL_SCANCODE_LSHIFT] || state[SDL_SCANCODE_RSHIFT] ) { float mult; float base = 0.015; if (state[SDL_SCANCODE_LSHIFT] || state[SDL_SCANCODE_RSHIFT]) { mult = 6; } else { mult = 1; } // W for ascend if (state[SDL_SCANCODE_W]) { if (media->command->arg3 < 0) media->command->arg3 = 0; media->command->arg3 += 2 * mult*base; if (media->command->arg3 > 1) media->command->arg3 = 1; } // S for descend if (state[SDL_SCANCODE_S]) { if (media->command->arg3 > 0) media->command->arg3 = 0; media->command->arg3 -= 2 * mult*base; if (media->command->arg3 < -1) media->command->arg3 = -1; } // D for move right if (state[SDL_SCANCODE_D]) { if (media->command->arg1 < 0) media->command->arg1 = 0; media->command->arg1 += mult*base; if (media->command->arg1 > 1) media->command->arg1 = 1; } // A for move left if (state[SDL_SCANCODE_A]) { if (media->command->arg1 > 0) media->command->arg1 = 0; media->command->arg1 -= mult*base; if (media->command->arg1 < -1) media->command->arg1 = -1; } // K for move backword if (state[SDL_SCANCODE_K]) { if (media->command->arg2 < 0) media->command->arg2 = 0; media->command->arg2 += mult*base; if (media->command->arg2 > 1) media->command->arg2 = 1; } // I for move forward if (state[SDL_SCANCODE_I]) { if (media->command->arg2 > 0) media->command->arg2 = 0; media->command->arg2 -= mult*base; if (media->command->arg2 < -1) media->command->arg2 = -1; } // L for turn right if (state[SDL_SCANCODE_L]) { if (media->command->arg4 < 0) media->command->arg4 = 0; media->command->arg4 += mult*base; if (media->command->arg4 > 1) media->command->arg4 = 1; } // J for turn left if (state[SDL_SCANCODE_J]) { if (media->command->arg4 > 0) media->command->arg4 = 0; media->command->arg4 -= mult*base; if (media->command->arg4 < -1) media->command->arg4 = -1; } media->command->flag_PCMD = true; } boost::this_thread::sleep(boost::posix_time::microseconds(300)); } // release // *note SDL objects have to be freed before closing avcodec. // otherwise it causes segmentation fault for some reason. SDL_DestroyTexture(bmpTex1); SDL_DestroyTexture(bmpTex2); SDL_DestroyRenderer(pRenderer1); SDL_DestroyRenderer(pRenderer2); SDL_DestroyWindow(pWindow1); SDL_DestroyWindow(pWindow2); free(pFrame_YUV420P); free(buffer_YUV420P); sws_freeContext(pConvertCtx_YUV420P); }
开发者ID:bakaknv,项目名称:Drone,代码行数:101,
示例2: sws_freeContext// use ffmpeg sws_scale, for cygwin win32 and android common// not for IOSstatus_t FFRender::render_sws(AVFrame* frame){#ifdef OS_IOS return ERROR;#else if (mConvertCtx == NULL || mSurfaceFrame == NULL) { if (mConvertCtx != NULL) { sws_freeContext(mConvertCtx); mConvertCtx = NULL; } if (mSurfaceFrame != NULL) { av_frame_free(&mSurfaceFrame); } //just do color format conversion //avoid doing scaling as it cost lots of cpu AVPixelFormat out_fmt;#if defined(__CYGWIN__) || defined(_MSC_VER) out_fmt = AV_PIX_FMT_RGB32;#else#ifdef RENDER_RGB565 out_fmt = PIX_FMT_RGB565;#else out_fmt = AV_PIX_FMT_RGB0;#endif#endif mConvertCtx = sws_getContext( frame->width, frame->height, (AVPixelFormat)frame->format, mFrameWidth, mFrameHeight, out_fmt, s_swsFlag, NULL, NULL, NULL); if (mConvertCtx == NULL) { LOGE("create convert ctx failed, width:%d, height:%d, pix:%d", mFrameWidth, mFrameHeight, mFrameFormat); return ERROR; } LOGI("sws context created %dx%d %d->%d", mFrameWidth, mFrameHeight, mFrameFormat, AV_PIX_FMT_BGR24); mSurfaceFrame = av_frame_alloc(); if (mSurfaceFrame == NULL) { LOGE("alloc frame failed"); return ERROR; } } void* surfacePixels = NULL;#ifdef __ANDROID__ if (Surface_getPixels(mNativeWindow, &mSurfaceWidth, &mSurfaceHeight, &mSurfaceStride, &surfacePixels) != OK) return ERROR;#else if (Surface_getPixels(&mSurfaceWidth, &mSurfaceHeight, &mSurfaceStride, &surfacePixels) != OK) return ERROR;#endif // Convert the image int64_t begin_scale = getNowMs(); if (mSurfaceStride >= mFrameWidth) { mSurfaceFrame->data[0] = (uint8_t*)surfacePixels;#ifdef RENDER_RGB565 mSurfaceFrame->linesize[0] = mSurfaceStride * 2;#else mSurfaceFrame->linesize[0] = mSurfaceStride * 4;#endif sws_scale(mConvertCtx, frame->data, frame->linesize, 0, frame->height, mSurfaceFrame->data, mSurfaceFrame->linesize); LOGD("sws_scale frame width:%d", mFrameWidth); LOGD("sws_scale frame height:%d", mFrameHeight); LOGD("sws_scale surface width:%d", mSurfaceWidth); LOGD("sws_scale surface height:%d", mSurfaceHeight); LOGD("sws_scale surface stride:%d", mSurfaceStride); } else { LOGE("Surface memory is too small"); } LOGD("before rendering frame");#ifdef __ANDROID__ if(Surface_updateSurface(mNativeWindow) != OK) {#else if(Surface_updateSurface() != OK) {#endif LOGE("Failed to render picture"); return ERROR; } LOGD("after rendering frame"); int64_t end_scale = getNowMs(); int64_t costTime = end_scale-begin_scale; if(mAveScaleTimeMs == 0) mAveScaleTimeMs = costTime;//.........这里部分代码省略.........
开发者ID:xeon2007,项目名称:MeetSDK,代码行数:101,
示例3: config//.........这里部分代码省略......... // downscale! if(vo_flags&VFCAP_HWSCALE_DOWN) x=0; } else { // upscale: if(vo_flags&VFCAP_HWSCALE_UP) x=0; } if(x){ // user wants sw scaling! (-zoom) vf->priv->w=d_width; vf->priv->h=d_height; } } if(vf->priv->noup){ if((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup){ vf->priv->w= width; vf->priv->h= height; } } if (vf->priv->w <= -8) { vf->priv->w += 8; round_w = 1; } if (vf->priv->h <= -8) { vf->priv->h += 8; round_h = 1; } if (vf->priv->w < -3 || vf->priv->h < -3 || (vf->priv->w < -1 && vf->priv->h < -1)) { // TODO: establish a direct connection to the user's brain // and find out what the heck he thinks MPlayer should do // with this nonsense. mp_msg(MSGT_VFILTER, MSGL_ERR, "SwScale: EUSERBROKEN Check your parameters, they make no sense!/n"); return 0; } if (vf->priv->w == -1) vf->priv->w = width; if (vf->priv->w == 0) vf->priv->w = d_width; if (vf->priv->h == -1) vf->priv->h = height; if (vf->priv->h == 0) vf->priv->h = d_height; if (vf->priv->w == -3) vf->priv->w = vf->priv->h * width / height; if (vf->priv->w == -2) vf->priv->w = vf->priv->h * d_width / d_height; if (vf->priv->h == -3) vf->priv->h = vf->priv->w * height / width; if (vf->priv->h == -2) vf->priv->h = vf->priv->w * d_height / d_width; if (round_w) vf->priv->w = ((vf->priv->w + 8) / 16) * 16; if (round_h) vf->priv->h = ((vf->priv->h + 8) / 16) * 16; // calculate the missing parameters: switch(best) { case IMGFMT_YV12: /* YV12 needs w & h rounded to 2 */ case IMGFMT_I420: case IMGFMT_IYUV: case IMGFMT_NV12: case IMGFMT_NV21: vf->priv->h = (vf->priv->h + 1) & ~1; case IMGFMT_YUY2: /* YUY2 needs w rounded to 2 */ case IMGFMT_UYVY: vf->priv->w = (vf->priv->w + 1) & ~1; } mp_msg(MSGT_VFILTER,MSGL_DBG2,"SwScale: scaling %dx%d %s to %dx%d %s /n", width,height,vo_format_name(outfmt), vf->priv->w,vf->priv->h,vo_format_name(best)); // free old ctx: if(vf->priv->ctx) sws_freeContext(vf->priv->ctx); if(vf->priv->ctx2)sws_freeContext(vf->priv->ctx2); // new swscaler: sws_getFlagsAndFilterFromCmdLine(&int_sws_flags, &srcFilter, &dstFilter); int_sws_flags|= vf->priv->v_chr_drop << SWS_SRC_V_CHR_DROP_SHIFT; int_sws_flags|= vf->priv->accurate_rnd * SWS_ACCURATE_RND; vf->priv->ctx=sws_getContext(width, height >> vf->priv->interlaced, sfmt, vf->priv->w, vf->priv->h >> vf->priv->interlaced, dfmt, int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param); if(vf->priv->interlaced){ vf->priv->ctx2=sws_getContext(width, height >> 1, sfmt, vf->priv->w, vf->priv->h >> 1, dfmt, int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param); }
开发者ID:dr4g0nsr,项目名称:mplayer-skyviia-8860,代码行数:101,
示例4: main//.........这里部分代码省略......... while (1) { cur_size = fread(in_buffer, 1, in_buffer_size, fp_in); if (cur_size == 0) break; cur_ptr=in_buffer; while (cur_size>0){ int len = av_parser_parse2( pCodecParserCtx, pCodecCtx, &packet.data, &packet.size, cur_ptr , cur_size , AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE); cur_ptr += len; cur_size -= len; if(packet.size==0) continue; //Some Info from AVCodecParserContext printf("Packet Size:%6d/t",packet.size); switch(pCodecParserCtx->pict_type){ case AV_PICTURE_TYPE_I: printf("Type: I/t");break; case AV_PICTURE_TYPE_P: printf("Type: P/t");break; case AV_PICTURE_TYPE_B: printf("Type: B/t");break; default: printf("Type: Other/t");break; } printf("Output Number:%4d/t",pCodecParserCtx->output_picture_number); printf("Offset:%lld/n",pCodecParserCtx->cur_offset); ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet); if (ret < 0) { printf("Decode Error./n"); return ret; } if (got_picture) { if(first_time){ printf("/nCodec Full Name:%s/n",pCodecCtx->codec->long_name); printf("width:%d/nheight:%d/n/n",pCodecCtx->width,pCodecCtx->height); //SwsContext img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); pFrameYUV=av_frame_alloc(); out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); y_size=pCodecCtx->width*pCodecCtx->height; first_time=0; } printf("Succeed to decode 1 frame!/n"); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); fwrite(pFrameYUV->data[0],1,y_size,fp_out); //Y fwrite(pFrameYUV->data[1],1,y_size/4,fp_out); //U fwrite(pFrameYUV->data[2],1,y_size/4,fp_out); //V } } } //Flush Decoder packet.data = NULL; packet.size = 0; while(1){ ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet); if (ret < 0) { printf("Decode Error./n"); return ret; } if (!got_picture) break; if (got_picture) { printf("Flush Decoder: Succeed to decode 1 frame!/n"); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); fwrite(pFrameYUV->data[0],1,y_size,fp_out); //Y fwrite(pFrameYUV->data[1],1,y_size/4,fp_out); //U fwrite(pFrameYUV->data[2],1,y_size/4,fp_out); //V } } fclose(fp_in); fclose(fp_out); sws_freeContext(img_convert_ctx); av_parser_close(pCodecParserCtx); av_frame_free(&pFrameYUV); av_frame_free(&pFrame); avcodec_close(pCodecCtx); av_free(pCodecCtx); return 0;}
开发者ID:jiemojiemo,项目名称:ffmpeg-,代码行数:101,
示例5: whilebool VideoPlayer::playback(){ clock.restart(); while (!stopPlayback && pFormatCtx && av_read_frame(pFormatCtx, &(packet))>=0) { if (!stopPlayback && (packet.stream_index == streamIndex)) { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = packet.data; avpkt.size = packet.size; avcodec_decode_video2(pCodecCtx, pFrame, &(frameFinished), &avpkt); double pts = 0; if (packet.dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { pts = *(uint64_t *)pFrame->opaque; } else if (packet.dts != AV_NOPTS_VALUE) { pts = packet.dts; } else { pts = 0; } pts *= av_q2d(video_st->time_base); if (frameFinished) { dd->boundingWidth = dd->boundingRect().width(); dd->boundingHeight = dd->boundingRect().height(); if (dd->boundingWidth > screenWidth) { dd->boundingWidth = screenWidth; } if (dd->boundingHeight > screenHeight) { dd->boundingHeight = screenHeight; } int useFilter = SWS_FAST_BILINEAR; switch (dd->m_swsFilter) { case DD_F_FAST_BILINEAR: useFilter = SWS_FAST_BILINEAR; break; case DD_F_BILINEAR: useFilter = SWS_BILINEAR; break; case DD_F_BICUBIC: useFilter = SWS_BICUBIC; break; case DD_F_X: useFilter = SWS_X; break; case DD_F_POINT: useFilter = SWS_POINT; break; case DD_F_AREA: useFilter = SWS_AREA; break; case DD_F_BICUBLIN: useFilter = SWS_BICUBLIN; break; case DD_F_GAUSS: useFilter = SWS_GAUSS; break; case DD_F_SINC: useFilter = SWS_SINC; break; case DD_F_LANCZOS: useFilter = SWS_LANCZOS; break; case DD_F_SPLINE: useFilter = SWS_SPLINE; break; } SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dd->boundingWidth, dd->boundingHeight, PIX_FMT_RGB32, useFilter, NULL, NULL, NULL); dd->mutex->lock(); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, dd->pFrameRGB->data, dd->pFrameRGB->linesize); sws_freeContext(img_convert_ctx); dd->mutex->unlock(); pts = synchronize_video(pts); double delay = 0; switch (dd->m_fpsRate) { case DD_FPS_AUTO: delay = (pts - last_pts); break; case DD_FPS_LIMIT_30: delay = 0.0333; break; case DD_FPS_LIMIT_25: delay = 0.04; break; case DD_FPS_LIMIT_20: delay = 0.05; break; case DD_FPS_LIMIT_15: delay = 0.0666; break; case DD_FPS_LIMIT_10: delay = 0.1; break; } if (delay <= 0 || delay >= 1.0) { delay = last_delay; } last_pts = pts; last_delay = delay; int elapsed = clock.restart(); int wait = (delay*1000)-elapsed; dd->updateFrame(); if (wait > 0) {//.........这里部分代码省略.........
开发者ID:Drezil,项目名称:Plasma-Wallpaper-DreamDesktop,代码行数:101,
示例6: configstatic int config(struct vo *vo, uint32_t width, uint32_t height, uint32_t d_width, uint32_t d_height, uint32_t flags, uint32_t format){ struct priv *p = vo->priv; Colormap theCmap; const struct fmt2Xfmtentry_s *fmte = fmt2Xfmt;#ifdef CONFIG_XF86VM int vm = flags & VOFLAG_MODESWITCHING;#endif p->Flip_Flag = flags & VOFLAG_FLIPPING; p->zoomFlag = 1; p->old_vo_dwidth = -1; p->old_vo_dheight = -1; p->in_format = format; p->srcW = width; p->srcH = height; XGetWindowAttributes(vo->x11->display, vo->x11->rootwin, &p->attribs); p->depth = p->attribs.depth; if (p->depth != 15 && p->depth != 16 && p->depth != 24 && p->depth != 32) { Visual *visual; p->depth = vo_find_depth_from_visuals(vo->x11->display, vo->x11->screen, &visual); } if (!XMatchVisualInfo(vo->x11->display, vo->x11->screen, p->depth, DirectColor, &p->vinfo) || (WinID > 0 && p->vinfo.visualid != XVisualIDFromVisual(p->attribs.visual))) { XMatchVisualInfo(vo->x11->display, vo->x11->screen, p->depth, TrueColor, &p->vinfo); } /* set image size (which is indeed neither the input nor output size), if zoom is on it will be changed during draw_slice anyway so we don't duplicate the aspect code here */ p->image_width = (width + 7) & (~7); p->image_height = height; {#ifdef CONFIG_XF86VM if (vm) vo_vm_switch(vo);#endif theCmap = vo_x11_create_colormap(vo, &p->vinfo); vo_x11_create_vo_window(vo, &p->vinfo, vo->dx, vo->dy, vo->dwidth, vo->dheight, flags, theCmap, "x11"); if (WinID > 0) p->depth = vo_x11_update_geometry(vo, true);#ifdef CONFIG_XF86VM if (vm) { /* Grab the mouse pointer in our window */ if (vo_grabpointer) XGrabPointer(vo->x11->display, vo->x11->window, True, 0, GrabModeAsync, GrabModeAsync, vo->x11->window, None, CurrentTime); XSetInputFocus(vo->x11->display, vo->x11->window, RevertToNone, CurrentTime); }#endif } if (p->myximage) { freeMyXImage(p); sws_freeContext(p->swsContext); } getMyXImage(p); while (fmte->mpfmt) { int depth = IMGFMT_RGB_DEPTH(fmte->mpfmt); /* bits_per_pixel in X seems to be set to 16 for 15 bit formats => force depth to 16 so that only the color masks are used for the format check */ if (depth == 15) depth = 16; if (depth == p->myximage->bits_per_pixel && fmte->byte_order == p->myximage->byte_order && fmte->red_mask == p->myximage->red_mask && fmte->green_mask == p->myximage->green_mask && fmte->blue_mask == p->myximage->blue_mask) break; fmte++; } if (!fmte->mpfmt) { mp_msg( MSGT_VO, MSGL_ERR, "X server image format not supported, please contact the developers/n"); return -1; }//.........这里部分代码省略.........
开发者ID:kax4,项目名称:mpv,代码行数:101,
示例7: configstatic int config(uint32_t width, uint32_t height, uint32_t d_width, uint32_t d_height, uint32_t flags, char *title, uint32_t format){// int screen;// int interval, prefer_blank, allow_exp, nothing; Colormap theCmap; const struct fmt2Xfmtentry_s *fmte = fmt2Xfmt;#ifdef CONFIG_XF86VM int vm = flags & VOFLAG_MODESWITCHING;#endif Flip_Flag = flags & VOFLAG_FLIPPING; zoomFlag = flags & VOFLAG_SWSCALE; old_vo_dwidth = -1; old_vo_dheight = -1; int_pause = 0; if (!title) title = "MPlayer X11 (XImage/Shm) render"; in_format = format; srcW = width; srcH = height; XGetWindowAttributes(mDisplay, mRootWin, &attribs); depth = attribs.depth; if (depth != 15 && depth != 16 && depth != 24 && depth != 32) { Visual *visual; depth = vo_find_depth_from_visuals(mDisplay, mScreen, &visual); } if (!XMatchVisualInfo(mDisplay, mScreen, depth, DirectColor, &vinfo) || (WinID > 0 && vinfo.visualid != XVisualIDFromVisual(attribs.visual))) XMatchVisualInfo(mDisplay, mScreen, depth, TrueColor, &vinfo); /* set image size (which is indeed neither the input nor output size), if zoom is on it will be changed during draw_slice anyway so we don't duplicate the aspect code here */ image_width = (width + 7) & (~7); image_height = height; {#ifdef CONFIG_XF86VM if (vm) { vo_vm_switch(); }#endif theCmap = vo_x11_create_colormap(&vinfo); vo_x11_create_vo_window(&vinfo, vo_dx, vo_dy, vo_dwidth, vo_dheight, flags, theCmap, "x11", title); if (WinID > 0) depth = vo_x11_update_geometry();#ifdef CONFIG_XF86VM if (vm) { /* Grab the mouse pointer in our window */ if (vo_grabpointer) XGrabPointer(mDisplay, vo_window, True, 0, GrabModeAsync, GrabModeAsync, vo_window, None, CurrentTime); XSetInputFocus(mDisplay, vo_window, RevertToNone, CurrentTime); }#endif } if (myximage) { freeMyXImage(); sws_freeContext(swsContext); } getMyXImage(); while (fmte->mpfmt) { int depth = IMGFMT_RGB_DEPTH(fmte->mpfmt); /* bits_per_pixel in X seems to be set to 16 for 15 bit formats => force depth to 16 so that only the color masks are used for the format check */ if (depth == 15) depth = 16; if (depth == myximage->bits_per_pixel && fmte->byte_order == myximage->byte_order && fmte->red_mask == myximage->red_mask && fmte->green_mask == myximage->green_mask && fmte->blue_mask == myximage->blue_mask) break; fmte++; } if (!fmte->mpfmt) { mp_msg(MSGT_VO, MSGL_ERR, "X server image format not supported, please contact the developers/n");//.........这里部分代码省略.........
开发者ID:JasonFengIce,项目名称:mplayer-android,代码行数:101,
示例8: avpicture_freebool FFmpegVideoDecoder::nextFrame( CBaseTexture * texture ){ // Just in case if ( !m_pCodecCtx ) return false; // If we did not preallocate the picture or the texture size changed, (re)allocate it if ( !m_pFrameRGB || texture->GetWidth() != m_frameRGBwidth || texture->GetHeight() != m_frameRGBheight ) { if ( m_pFrameRGB ) { avpicture_free( m_pFrameRGB ); av_free( m_pFrameRGB ); } m_frameRGBwidth = texture->GetWidth(); m_frameRGBheight = texture->GetHeight(); // Allocate the conversion frame and relevant picture m_pFrameRGB = (AVPicture*)av_mallocz(sizeof(AVPicture)); if ( !m_pFrameRGB ) return false; // Due to a bug in swsscale we need to allocate one extra line of data if ( avpicture_alloc( m_pFrameRGB, PIX_FMT_RGB32, m_frameRGBwidth, m_frameRGBheight + 1 ) < 0 ) return false; } AVPacket packet; int frameFinished; while ( true ) { // Read a frame if ( av_read_frame( m_pFormatCtx, &packet ) < 0 ) return false; // Frame read failed (e.g. end of stream) if ( packet.stream_index == m_videoStream ) { // Is this a packet from the video stream -> decode video frame avcodec_decode_video2( m_pCodecCtx, m_pFrame, &frameFinished, &packet ); // Did we get a video frame? if ( frameFinished ) { if ( packet.dts != (int64_t)AV_NOPTS_VALUE ) m_lastFrameTime = packet.dts * av_q2d( m_pFormatCtx->streams[ m_videoStream ]->time_base ); else m_lastFrameTime = 0.0; break; } } av_free_packet( &packet ); } // We got the video frame, render it into the picture buffer struct SwsContext * context = sws_getContext( m_pCodecCtx->width, m_pCodecCtx->height, m_pCodecCtx->pix_fmt, m_frameRGBwidth, m_frameRGBheight, PIX_FMT_RGB32, SWS_FAST_BILINEAR, NULL, NULL, NULL ); sws_scale( context, m_pFrame->data, m_pFrame->linesize, 0, m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize ); sws_freeContext( context ); av_free_packet( &packet ); // And into the texture texture->Update( m_frameRGBwidth, m_frameRGBheight, m_frameRGBwidth * 4, XB_FMT_A8R8G8B8, m_pFrameRGB->data[0], false ); return true;}
开发者ID:Karlson2k,项目名称:xbmc,代码行数:72,
示例9: avpicture_fillbool CFFmpegImage::Decode(unsigned char * const pixels, unsigned int width, unsigned int height, unsigned int pitch, unsigned int format){ if (m_width == 0 || m_height == 0 || format != XB_FMT_A8R8G8B8) return false; if (!m_pFrame || !m_pFrame->data[0]) { CLog::LogFunction(LOGERROR, __FUNCTION__, "AVFrame member not allocated"); return false; } AVPicture* pictureRGB = static_cast<AVPicture*>(av_mallocz(sizeof(AVPicture))); if (!pictureRGB) { CLog::LogFunction(LOGERROR, __FUNCTION__, "AVPicture could not be allocated"); return false; } int size = avpicture_fill(pictureRGB, NULL, AV_PIX_FMT_RGB32, width, height); if (size < 0) { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVPicture member with %i x %i pixes", width, height); av_free(pictureRGB); return false; } bool needsCopy = false; int pixelsSize = pitch * height; if (size == pixelsSize && (int) pitch == pictureRGB->linesize[0]) { // We can use the pixels buffer directly pictureRGB->data[0] = pixels; } else { // We need an extra buffer and copy it manually afterwards if (avpicture_alloc(pictureRGB, AV_PIX_FMT_RGB32, width, height) < 0) { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate temp buffer of size %i bytes", size); av_free(pictureRGB); return false; } needsCopy = true; } // Especially jpeg formats are full range this we need to take care here // Input Formats like RGBA are handled correctly automatically AVColorRange range = av_frame_get_color_range(m_pFrame); AVPixelFormat pixFormat = ConvertFormats(m_pFrame); // assumption quadratic maximums e.g. 2048x2048 float ratio = m_width / (float) m_height; unsigned int nHeight = m_originalHeight; unsigned int nWidth = m_originalWidth; if (nHeight > height) { nHeight = height; nWidth = (unsigned int) (nHeight * ratio + 0.5f); } if (nWidth > width) { nWidth = width; nHeight = (unsigned int) (nWidth / ratio + 0.5f); } struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat, nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); if (range == AVCOL_RANGE_JPEG) { int* inv_table = nullptr; int* table = nullptr; int srcRange, dstRange, brightness, contrast, saturation; sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation); srcRange = 1; sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation); } sws_scale(context, m_pFrame->data, m_pFrame->linesize, 0, m_originalHeight, pictureRGB->data, pictureRGB->linesize); sws_freeContext(context); if (needsCopy) { int minPitch = std::min((int)pitch, pictureRGB->linesize[0]); if (minPitch < 0) { CLog::LogFunction(LOGERROR, __FUNCTION__, "negative pitch or height"); av_free(pictureRGB); return false; } const unsigned char *src = pictureRGB->data[0]; unsigned char* dst = pixels; for (unsigned int y = 0; y < nHeight; y++) { memcpy(dst, src, minPitch); src += pictureRGB->linesize[0]; dst += pitch;//.........这里部分代码省略.........
开发者ID:qaweizoux,项目名称:xbmc,代码行数:101,
示例10: FreeEncodervoid FreeEncoder(x264_t* encoder) { x264_encoder_close(encoder); sws_freeContext(convertCtx);}
开发者ID:itbrandonsilva,项目名称:remote-local,代码行数:4,
示例11: FreeFfmpegvoid FreeFfmpeg(AVCodecContext* ctx) { avcodec_close(ctx); av_frame_free(&av_frame_rgba); av_frame_free(&av_frame); sws_freeContext(convert_context);}
开发者ID:itbrandonsilva,项目名称:remote-local,代码行数:6,
示例12: sws_freeContext ~ImageConverterFFPrivate() { if (sws_ctx) { sws_freeContext(sws_ctx); sws_ctx = 0; } }
开发者ID:JeremyWong0715,项目名称:QtAV,代码行数:6,
示例13: sws_freeContextRasterRenderPrivate::~RasterRenderPrivate(){ sws_freeContext(swsctx); delete srcFrame; delete dstFrame;}
开发者ID:FantasyNJ,项目名称:BiliLocal,代码行数:6,
示例14: output_single_framestatic int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i, double *zoom, double *dx, double *dy){ ZPContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t pts = s->frame_count; int k, x, y, w, h, ret = 0; uint8_t *input[4]; int px[4], py[4]; AVFrame *out; var_values[VAR_PX] = s->x; var_values[VAR_PY] = s->y; var_values[VAR_PZOOM] = s->prev_zoom; var_values[VAR_PDURATION] = s->prev_nb_frames; var_values[VAR_TIME] = pts * av_q2d(outlink->time_base); var_values[VAR_FRAME] = i; var_values[VAR_ON] = outlink->frame_count + 1; if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) return ret; *zoom = av_clipd(*zoom, 1, 10); var_values[VAR_ZOOM] = *zoom; w = in->width * (1.0 / *zoom); h = in->height * (1.0 / *zoom); if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) return ret; x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0)); var_values[VAR_X] = *dx; x &= ~((1 << s->desc->log2_chroma_w) - 1); if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) return ret; y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0)); var_values[VAR_Y] = *dy; y &= ~((1 << s->desc->log2_chroma_h) - 1); out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { ret = AVERROR(ENOMEM); return ret; } px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w); px[0] = px[3] = x; py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h); py[0] = py[3] = y; s->sws = sws_alloc_context(); if (!s->sws) { ret = AVERROR(ENOMEM); return ret; } for (k = 0; in->data[k]; k++) input[k] = in->data[k] + py[k] * in->linesize[k] + px[k]; av_opt_set_int(s->sws, "srcw", w, 0); av_opt_set_int(s->sws, "srch", h, 0); av_opt_set_int(s->sws, "src_format", in->format, 0); av_opt_set_int(s->sws, "dstw", outlink->w, 0); av_opt_set_int(s->sws, "dsth", outlink->h, 0); av_opt_set_int(s->sws, "dst_format", outlink->format, 0); av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0); if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0) return ret; sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize); out->pts = pts; s->frame_count++; ret = ff_filter_frame(outlink, out); sws_freeContext(s->sws); s->sws = NULL; s->current_frame++; return ret;}
开发者ID:chris-magic,项目名称:xplayer,代码行数:87,
示例15: INFO//.........这里部分代码省略......... /* Determine required buffer size and allocate buffer */ numBytes = avpicture_get_size(PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height); buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t), 1); avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height); _phone->sws_ctx = sws_getContext(_phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height, _phone->webcam_decoder_ctx->pix_fmt, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); vpx_image_t *image = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height, 1); //uint32_t frame_counter = 0; while (_phone->running_encvid) { if (av_read_frame(_phone->video_format_ctx, packet) < 0) { printf("error reading frame/n"); if (_phone->video_format_ctx->pb->error != 0) break; continue; } if (packet->stream_index == _phone->video_stream) { if (avcodec_decode_video2(_phone->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) { printf("couldn't decode/n"); continue; } av_free_packet(packet); sws_scale(_phone->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0, _phone->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize); /* create a new I-frame every 60 frames */ //++p; /* if (p == 60) { s_video_frame->pict_type = AV_PICTURE_TYPE_BI ; } else if (p == 61) { s_video_frame->pict_type = AV_PICTURE_TYPE_I ; p = 0; } else { s_video_frame->pict_type = AV_PICTURE_TYPE_P ; }*/ if (video_frame_finished) { memcpy(image->planes[VPX_PLANE_Y], s_video_frame->data[0], s_video_frame->linesize[0] * _phone->webcam_decoder_ctx->height); memcpy(image->planes[VPX_PLANE_U], s_video_frame->data[1], s_video_frame->linesize[1] * _phone->webcam_decoder_ctx->height / 2); memcpy(image->planes[VPX_PLANE_V], s_video_frame->data[2], s_video_frame->linesize[2] * _phone->webcam_decoder_ctx->height / 2); toxav_send_video (_phone->av, image); //if (avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet) < 0) { /*if (vpx_codec_encode(&cs->v_encoder, image, frame_counter, 1, 0, 0) != VPX_CODEC_OK) { printf("could not encode video frame/n"); continue; } ++frame_counter; vpx_codec_iter_t iter = NULL; vpx_codec_cx_pkt_t *pkt; while( (pkt = vpx_codec_get_cx_data(&cs->v_encoder, &iter)) ) { if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) toxav_send_rtp_payload(_phone->av, TypeVideo, pkt->data.frame.buf, pkt->data.frame.sz); }*/ //if (!got_packet) { // continue; //} //if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL/n"); //toxav_send_rtp_payload(_phone->av, TypeVideo, enc_video_packet.data, enc_video_packet.size); //av_free_packet(&enc_video_packet); } } else { av_free_packet(packet); } } vpx_img_free(image); /* clean up codecs */ //pthread_mutex_lock(&cs->ctrl_mutex); av_free(buffer); av_free(webcam_frame); av_free(s_video_frame); sws_freeContext(_phone->sws_ctx); //avcodec_close(webcam_decoder_ctx); //avcodec_close(cs->video_encoder_ctx); //pthread_mutex_unlock(&cs->ctrl_mutex); _phone->running_encvid = -1; pthread_exit ( NULL );}
开发者ID:aarvay,项目名称:ProjectTox-Core,代码行数:101,
示例16: av_dump_format//.........这里部分代码省略......... if(ret == AVERROR_EOF) break; else av_err_msg("av_read_frame", ret); } if(packet.stream_index == cam->video_stream_index) { // start on keyframe if(!got_key_frame && !(packet.flags & AV_PKT_FLAG_KEY)) { continue; } got_key_frame = 1; avcodec_get_frame_defaults(frame); got_frame = 0; cnt = (cnt + 1) % cam->analize_frames; if(cnt == 0) { if((ret = avcodec_decode_video2(cam->codec, frame, &got_frame, &packet)) < 0) av_err_msg("avcodec_decode_video2", ret); if(got_frame) { if(detect_motion(&md, frame)) { if(first_activity == 0) first_activity = time(NULL); last_activity = time(NULL); } else { if(first_activity > 0 && time(NULL) - last_activity > cam->motion_delay) { if(!first_detection) db_create_event(cam->id, first_activity, last_activity); else first_detection = 0; first_activity = 0; } } } if(time(NULL) - cam->last_screenshot > 60 && (packet.flags & AV_PKT_FLAG_KEY)) { char fname[128]; snprintf(fname, sizeof(fname), "%s/%s/screenshot.png", store_dir, cam->name); cvSaveImage(fname, md.cur, 0); cam->last_screenshot = time(NULL); } } packet.stream_index = cam->output_stream->id; if((ret = av_write_frame(cam->output_context, &packet)) < 0) av_err_msg("av_write_frame", ret); pthread_mutex_lock(&cam->consumers_lock); for(l1 *p = cam->cam_consumers_list; p != NULL; p = p->next) { struct cam_consumer *consumer = (struct cam_consumer *)p->value; if(!consumer->screen->active) continue; if(consumer->screen->tmpl_size == 1) { packet.stream_index = 0; if((ret = av_write_frame(consumer->screen->rtp_context, &packet)) < 0) av_err_msg("av_write_frame", ret); } else { if(!got_frame) { if((ret = avcodec_decode_video2(cam->codec, frame, &got_frame, &packet)) < 0) { av_err_msg("avcodec_decode_video2", ret); break; } } if(got_frame) copy_frame_to_consumer(frame, cam->codec->height, consumer); } } pthread_mutex_unlock(&cam->consumers_lock); } av_free_packet(&packet); if(!cam->active) { break; } if(time(NULL) - cam->file_started_at > 60 * 60) { db_update_videofile(cam); close_output(cam); open_output(cam); got_key_frame = 0; } } db_update_videofile(cam); close_output(cam); if((ret = avcodec_close(cam->codec)) < 0) av_err_msg("avcodec_close", ret); avformat_close_input(&cam->context); av_free(frame); cvReleaseImage(&md.prev); cvReleaseImage(&md.cur); cvReleaseImage(&md.silh); av_free(md.buffer); sws_freeContext(md.img_convert_ctx); return NULL;}
开发者ID:sanek701,项目名称:CCTV-linux-msiu,代码行数:101,
示例17: switchbool CRetroPlayerVideo::CheckConfiguration(const DVDVideoPicture &picture){ const double framerate = 1 / picture.iDuration; if (g_renderManager.IsConfigured() && m_outputWidth == picture.iWidth && m_outputHeight == picture.iHeight && m_outputFramerate == framerate) { // Already configured properly return true; } // Determine RenderManager flags unsigned int flags = 0; if (picture.color_range == 1) flags |= CONF_FLAGS_YUV_FULLRANGE; flags |= CONF_FLAGS_YUVCOEF_BT601; // picture.color_matrix = 4 if (m_bAllowFullscreen) { flags |= CONF_FLAGS_FULLSCREEN; m_bAllowFullscreen = false; // only allow on first configure } CLog::Log(LOGDEBUG, "RetroPlayerVideo: Change configuration: %dx%d, %4.2f fps", picture.iWidth, picture.iHeight, framerate); int orientation = 0; // (90 = 5, 180 = 2, 270 = 7), if we ever want to use RETRO_ENVIRONMENT_SET_ROTATION if (!g_renderManager.Configure(picture.iWidth, picture.iHeight, picture.iDisplayWidth, picture.iDisplayHeight, (float)framerate, flags, picture.format, picture.extended_format, orientation)) { CLog::Log(LOGERROR, "RetroPlayerVideo: Failed to configure renderer"); return false; } m_outputWidth = picture.iWidth; m_outputHeight = picture.iHeight; m_outputFramerate = framerate; PixelFormat format; switch (m_pixelFormat) { case GAME_PIXEL_FORMAT_XRGB8888: CLog::Log(LOGINFO, "RetroPlayerVideo: Pixel Format: XRGB8888, using PIX_FMT_0RGB32"); format = PIX_FMT_0RGB32; break; case GAME_PIXEL_FORMAT_RGB565: CLog::Log(LOGINFO, "RetroPlayerVideo: Pixel Format: RGB565, using PIX_FMT_RGB565"); format = PIX_FMT_RGB565; break; case GAME_PIXEL_FORMAT_0RGB1555: default: CLog::Log(LOGINFO, "RetroPlayerVideo: Pixel Format: 0RGB1555, using PIX_FMT_RGB555"); format = PIX_FMT_RGB555; break; } if (m_swsContext) sws_freeContext(m_swsContext); m_swsContext = sws_getContext( picture.iWidth, picture.iHeight, format, picture.iWidth, picture.iHeight, PIX_FMT_YUV420P, SWS_FAST_BILINEAR | SwScaleCPUFlags(), NULL, NULL, NULL ); return true;}
开发者ID:pixl-project,项目名称:xbmc,代码行数:75,
示例18: process_instanceint process_instance( livido_port_t *my_instance, double timecode ){ uint8_t *A[4] = {NULL,NULL,NULL,NULL}; uint8_t *O[4]= {NULL,NULL,NULL,NULL}; int palette; int w; int h; lvd_crop_t *crop = NULL; livido_property_get( my_instance, "PLUGIN_private", 0, &crop ); if( crop == NULL ) return LIVIDO_ERROR_INTERNAL; int error = lvd_extract_channel_values( my_instance, "out_channels", 0, &w,&h, O,&palette ); if( error != LIVIDO_NO_ERROR ) return LIVIDO_ERROR_NO_OUTPUT_CHANNELS; error = lvd_extract_channel_values( my_instance, "in_channels" , 0, &w, &h, A, &palette ); if( error != LIVIDO_NO_ERROR ) return LIVIDO_ERROR_NO_INPUT_CHANNELS; int left = lvd_extract_param_index( my_instance,"in_parameters", 0 ); int right = lvd_extract_param_index( my_instance,"in_parameters", 1 ); int top = lvd_extract_param_index( my_instance, "in_parameters", 2 ); int bottom = lvd_extract_param_index( my_instance, "in_parameters", 3); int scale = lvd_extract_param_index( my_instance, "in_parameters", 4); int tmp_w = ( w - left - right); int tmp_h = h - top - bottom; if( tmp_w < 0 ) tmp_w = 0; if( tmp_h < 0 ) tmp_h = 0; if( tmp_w != crop->w || tmp_h != crop->h ) { if( crop->sws ) { sws_freeContext( crop->sws ); crop->sws = NULL; } crop->w = tmp_w; crop->h = tmp_h; } int crop_strides[4] = { crop->w, crop->w, crop->w, 0 }; int dst_strides[4] = { w, w, w, 0 }; if( !lvd_crop_plane( crop->buf[0], A[0], left, right, top, bottom, w, h ) ) return LIVIDO_NO_ERROR; if( !lvd_crop_plane( crop->buf[1], A[1], left, right, top, bottom, w, h ) ) return LIVIDO_NO_ERROR; if( !lvd_crop_plane( crop->buf[2], A[2], left, right, top, bottom, w, h ) ) return LIVIDO_NO_ERROR; if( crop->sws == NULL ) { crop->sws = sws_getContext(crop->w,crop->h,PIX_FMT_YUV444P,w,h,PIX_FMT_YUV444P,crop->flags,NULL,NULL,NULL); if( crop->sws == NULL ) return LIVIDO_ERROR_INTERNAL; } sws_scale(crop->sws,(const uint8_t * const *)crop->buf,crop_strides,0,crop->h,(uint8_t * const *) O,dst_strides); return LIVIDO_NO_ERROR;}
开发者ID:c0ntrol,项目名称:veejay,代码行数:68,
示例19: draw_osdstatic void draw_osd(struct vo *vo, struct osd_state *osd){ struct priv *p = vo->priv; struct mp_image img = get_x_buffer(p); struct mp_osd_res res = { .w = img.w, .h = img.h, .display_par = vo->monitor_par, .video_par = vo->aspdat.par, }; osd_draw_on_image_bk(osd, res, osd->vo_pts, 0, p->osd_backup, &img);}static mp_image_t *get_screenshot(struct vo *vo){ struct priv *p = vo->priv; struct mp_image img = get_x_buffer(p); struct mp_image *res = alloc_mpi(img.w, img.h, img.imgfmt); copy_mpi(res, &img); mp_draw_sub_backup_restore(p->osd_backup, res); return res;}static int redraw_frame(struct vo *vo){ struct priv *p = vo->priv; struct mp_image img = get_x_buffer(p); mp_draw_sub_backup_restore(p->osd_backup, &img); return true;}static void flip_page(struct vo *vo){ struct priv *p = vo->priv; Display_Image(p, p->myximage, p->ImageData); XSync(vo->x11->display, False);}static int draw_slice(struct vo *vo, uint8_t *src[], int stride[], int w, int h, int x, int y){ struct priv *p = vo->priv; uint8_t *dst[MP_MAX_PLANES] = {NULL}; int dstStride[MP_MAX_PLANES] = {0}; if ((p->old_vo_dwidth != vo->dwidth || p->old_vo_dheight != vo->dheight) /*&& y==0 */ && p->zoomFlag) { int newW = vo->dwidth; int newH = vo->dheight; struct SwsContext *oldContext = p->swsContext; p->old_vo_dwidth = vo->dwidth; p->old_vo_dheight = vo->dheight; if (vo_fs) aspect(vo, &newW, &newH, A_ZOOM); if (sws_flags == 0) newW &= (~31); // not needed but, if the user wants the FAST_BILINEAR SCALER, then its needed p->swsContext = sws_getContextFromCmdLine(p->srcW, p->srcH, p->in_format, newW, newH, p->out_format); if (p->swsContext) { p->image_width = (newW + 7) & (~7); p->image_height = newH; freeMyXImage(p); getMyXImage(p); sws_freeContext(oldContext); } else p->swsContext = oldContext; p->dst_width = newW; } dstStride[0] = p->image_width * ((p->bpp + 7) / 8); dst[0] = p->ImageData; if (p->Flip_Flag) { dst[0] += dstStride[0] * (p->image_height - 1); dstStride[0] = -dstStride[0]; } sws_scale(p->swsContext, (const uint8_t **)src, stride, y, h, dst, dstStride); mp_draw_sub_backup_reset(p->osd_backup); return 0;}static int query_format(struct vo *vo, uint32_t format){ mp_msg(MSGT_VO, MSGL_DBG2, "vo_x11: query_format was called: %x (%s)/n", format, vo_format_name(format)); if (IMGFMT_IS_BGR(format)) {//.........这里部分代码省略.........
开发者ID:kax4,项目名称:mpv,代码行数:101,
示例20: init_picture_from_frameint FFMPEG::convert_cmodel(AVPicture *picture_in, PixelFormat pix_fmt_in, int width_in, int height_in, VFrame *frame_out) { // set up a temporary picture_out from frame_out AVPicture picture_out; init_picture_from_frame(&picture_out, frame_out); int cmodel_out = frame_out->get_color_model(); PixelFormat pix_fmt_out = color_model_to_pix_fmt(cmodel_out);#ifdef HAVE_SWSCALER // We need a context for swscale struct SwsContext *convert_ctx;#endif int result;#ifndef HAVE_SWSCALER // do conversion within libavcodec if possible if (pix_fmt_out != PIX_FMT_NB) { result = img_convert(&picture_out, pix_fmt_out, picture_in, pix_fmt_in, width_in, height_in); if (result) { printf("FFMPEG::convert_cmodel img_convert() failed/n"); } return result; }#else convert_ctx = sws_getContext(width_in, height_in,pix_fmt_in, frame_out->get_w(),frame_out->get_h(),pix_fmt_out, SWS_BICUBIC, NULL, NULL, NULL); if(convert_ctx == NULL) { printf("FFMPEG::convert_cmodel : swscale context initialization failed/n"); return 1; } result = sws_scale(convert_ctx, picture_in->data, picture_in->linesize, width_in, height_in, picture_out.data, picture_out.linesize); sws_freeContext(convert_ctx); if(result) { printf("FFMPEG::convert_cmodel sws_scale() failed/n"); }#endif // make an intermediate temp frame only if necessary int cmodel_in = pix_fmt_to_color_model(pix_fmt_in); if (cmodel_in == BC_TRANSPARENCY) { if (pix_fmt_in == PIX_FMT_RGB32) { // avoid infinite recursion if things are broken printf("FFMPEG::convert_cmodel pix_fmt_in broken!/n"); return 1; } // NOTE: choose RGBA8888 as a hopefully non-lossy colormodel VFrame *temp_frame = new VFrame(0, width_in, height_in, BC_RGBA8888); if (convert_cmodel(picture_in, pix_fmt_in, width_in, height_in, temp_frame)) { delete temp_frame; return 1; // recursed call will print error message } int result = convert_cmodel(temp_frame, frame_out); delete temp_frame; return result; } // NOTE: no scaling possible in img_convert() so none possible here if (frame_out->get_w() != width_in || frame_out->get_h() != height_in) { printf("scaling from %dx%d to %dx%d not allowed/n", width_in, height_in, frame_out->get_w(), frame_out->get_h()); return 1; } // if we reach here we know that cmodel_transfer() will work uint8_t *yuv_in[3] = {0,0,0}; uint8_t *row_pointers_in[height_in]; if (cmodel_is_planar(cmodel_in)) { yuv_in[0] = picture_in->data[0]; yuv_in[1] = picture_in->data[1]; yuv_in[2] = picture_in->data[2]; } else { // set row pointers for picture_in uint8_t *data = picture_in->data[0]; int bytes_per_line = cmodel_calculate_pixelsize(cmodel_in) * height_in; for (int i = 0; i < height_in; i++) { row_pointers_in[i] = data + i * bytes_per_line;//.........这里部分代码省略.........
开发者ID:petterreinholdtsen,项目名称:cinelerra-cv,代码行数:101,
示例21: main//.........这里部分代码省略......... if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){ printf("Could not open codec./n"); return -1; } pFrame=av_frame_alloc(); pFrameYUV=av_frame_alloc(); out_buffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height,1)); av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize,out_buffer, AV_PIX_FMT_YUV420P,pCodecCtx->width, pCodecCtx->height,1); //Output Info----------------------------- printf("---------------- File Information ---------------/n"); av_dump_format(pFormatCtx,0,filepath,0); printf("-------------------------------------------------/n"); img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf( "Could not initialize SDL - %s/n", SDL_GetError()); return -1; } //SDL 2.0 Support for multiple windows screen_w = pCodecCtx->width; screen_h = pCodecCtx->height; screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, screen_w, screen_h,SDL_WINDOW_OPENGL); if(!screen) { printf("SDL: could not create window - exiting:%s/n",SDL_GetError()); return -1; } sdlRenderer = SDL_CreateRenderer(screen, -1, 0); //IYUV: Y + U + V (3 planes) //YV12: Y + V + U (3 planes) sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height); sdlRect.x=0; sdlRect.y=0; sdlRect.w=screen_w; sdlRect.h=screen_h; packet=(AVPacket *)av_malloc(sizeof(AVPacket)); video_tid = SDL_CreateThread(sfp_refresh_thread,NULL,NULL); //------------SDL End------------ //Event Loop for (;;) { //Wait SDL_WaitEvent(&event); if(event.type==SFM_REFRESH_EVENT){ while(1){ if(av_read_frame(pFormatCtx, packet)<0) thread_exit=1; if(packet->stream_index==videoindex) break; } ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); if(ret < 0){ printf("Decode Error./n"); return -1; } if(got_picture){ sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); //SDL--------------------------- SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] ); SDL_RenderClear( sdlRenderer ); //SDL_RenderCopy( sdlRenderer, sdlTexture, &sdlRect, &sdlRect ); SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, NULL); SDL_RenderPresent( sdlRenderer ); //SDL End----------------------- } av_free_packet(packet); }else if(event.type==SDL_KEYDOWN){ //Pause if(event.key.keysym.sym==SDLK_SPACE) thread_pause=!thread_pause; }else if(event.type==SDL_QUIT){ thread_exit=1; }else if(event.type==SFM_BREAK_EVENT){ break; } } sws_freeContext(img_convert_ctx); SDL_Quit(); //-------------- av_frame_free(&pFrameYUV); av_frame_free(&pFrame); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0;}
开发者ID:AlexeiPi,项目名称:simplest_ffmpeg_player,代码行数:101,
示例22: SAFE_DELETE//.........这里部分代码省略......... int nSeekTo = (pos==-1?nTotalLen / 3:pos); CLog::Log(LOGDEBUG,"%s - seeking to pos %dms (total: %dms) in %s", __FUNCTION__, nSeekTo, nTotalLen, redactPath.c_str()); if (pDemuxer->SeekTime(nSeekTo, true)) { int iDecoderState = VC_ERROR; DVDVideoPicture picture; memset(&picture, 0, sizeof(picture)); // num streams * 160 frames, should get a valid frame, if not abort. int abort_index = pDemuxer->GetNrOfStreams() * 160; do { DemuxPacket* pPacket = pDemuxer->Read(); packetsTried++; if (!pPacket) break; if (pPacket->iStreamId != nVideoStream) { CDVDDemuxUtils::FreeDemuxPacket(pPacket); continue; } iDecoderState = pVideoCodec->Decode(pPacket->pData, pPacket->iSize, pPacket->dts, pPacket->pts); CDVDDemuxUtils::FreeDemuxPacket(pPacket); if (iDecoderState & VC_ERROR) break; if (iDecoderState & VC_PICTURE) { memset(&picture, 0, sizeof(DVDVideoPicture)); if (pVideoCodec->GetPicture(&picture)) { if(!(picture.iFlags & DVP_FLAG_DROPPED)) break; } } } while (abort_index--); if (iDecoderState & VC_PICTURE && !(picture.iFlags & DVP_FLAG_DROPPED)) { { unsigned int nWidth = g_advancedSettings.GetThumbSize(); double aspect = (double)picture.iDisplayWidth / (double)picture.iDisplayHeight; if(hint.forced_aspect && hint.aspect != 0) aspect = hint.aspect; unsigned int nHeight = (unsigned int)((double)g_advancedSettings.GetThumbSize() / aspect); uint8_t *pOutBuf = new uint8_t[nWidth * nHeight * 4]; struct SwsContext *context = sws_getContext(picture.iWidth, picture.iHeight, PIX_FMT_YUV420P, nWidth, nHeight, PIX_FMT_BGRA, SWS_FAST_BILINEAR | SwScaleCPUFlags(), NULL, NULL, NULL); if (context) { uint8_t *src[] = { picture.data[0], picture.data[1], picture.data[2], 0 }; int srcStride[] = { picture.iLineSize[0], picture.iLineSize[1], picture.iLineSize[2], 0 }; uint8_t *dst[] = { pOutBuf, 0, 0, 0 }; int dstStride[] = { (int)nWidth*4, 0, 0, 0 }; int orientation = DegreeToOrientation(hint.orientation); sws_scale(context, src, srcStride, 0, picture.iHeight, dst, dstStride); sws_freeContext(context); details.width = nWidth; details.height = nHeight; CPicture::CacheTexture(pOutBuf, nWidth, nHeight, nWidth * 4, orientation, nWidth, nHeight, CTextureCache::GetCachedPath(details.file)); bOk = true; } SAFE_DELETE_ARRAY(pOutBuf); } } else { CLog::Log(LOGDEBUG,"%s - decode failed in %s after %d packets.", __FUNCTION__, redactPath.c_str(), packetsTried); } } SAFE_DELETE(pVideoCodec); } } if (pDemuxer) SAFE_DELETE(pDemuxer); SAFE_DELETE(pInputStream); if(!bOk) { XFILE::CFile file; if(file.OpenForWrite(CTextureCache::GetCachedPath(details.file))) file.Close(); } unsigned int nTotalTime = XbmcThreads::SystemClockMillis() - nTime; CLog::Log(LOGDEBUG,"%s - measured %u ms to extract thumb from file <%s> in %d packets. ", __FUNCTION__, nTotalTime, redactPath.c_str(), packetsTried); return bOk;}
开发者ID:zjcdxzy,项目名称:mrmc,代码行数:101,
示例23: TRACEstatus_tAVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat){ TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()/n"); TRACE(" requested video format 0x%x/n", inOutFormat->u.raw_video.display.format); // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the // default colordepth is RGB32). if (inOutFormat->u.raw_video.display.format == B_YCbCr422) fOutputColorSpace = B_YCbCr422; else fOutputColorSpace = B_RGB32;#if USE_SWS_FOR_COLOR_SPACE_CONVERSION if (fSwsContext != NULL) sws_freeContext(fSwsContext); fSwsContext = NULL;#else fFormatConversionFunc = 0;#endif fContext->extradata = (uint8_t*)fExtraData; fContext->extradata_size = fExtraDataSize; bool codecCanHandleIncompleteFrames = (fCodec->capabilities & CODEC_CAP_TRUNCATED) != 0; if (codecCanHandleIncompleteFrames) { // Expect and handle video frames to be splitted across consecutive // data chunks. fContext->flags |= CODEC_FLAG_TRUNCATED; } // close any previous instance if (fCodecInitDone) { fCodecInitDone = false; avcodec_close(fContext); } if (avcodec_open2(fContext, fCodec, NULL) >= 0) fCodecInitDone = true; else { TRACE("avcodec_open() failed to init codec!/n"); return B_ERROR; } _ResetTempPacket(); status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame(); if (statusOfDecodingFirstFrame != B_OK) { TRACE("[v] decoding first video frame failed/n"); return B_ERROR; } // Note: fSwsContext / fFormatConversionFunc should have been initialized // by first call to _DecodeNextVideoFrame() above.#if USE_SWS_FOR_COLOR_SPACE_CONVERSION if (fSwsContext == NULL) { TRACE("No SWS Scale context or decoder has not set the pixel format " "yet!/n"); }#else if (fFormatConversionFunc == NULL) { TRACE("no pixel format conversion function found or decoder has " "not set the pixel format yet!/n"); }#endif inOutFormat->type = B_MEDIA_RAW_VIDEO; inOutFormat->require_flags = 0; inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output; inOutFormat->u.raw_video.interlace = 1; // Progressive (non-interlaced) video frames are delivered inOutFormat->u.raw_video.first_active = fHeader.u.raw_video.first_active_line; inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count; inOutFormat->u.raw_video.pixel_width_aspect = fHeader.u.raw_video.pixel_width_aspect; inOutFormat->u.raw_video.pixel_height_aspect = fHeader.u.raw_video.pixel_height_aspect; inOutFormat->u.raw_video.field_rate = fOutputFrameRate; // Was calculated by first call to _DecodeNextVideoFrame() inOutFormat->u.raw_video.display.format = fOutputColorSpace; inOutFormat->u.raw_video.display.line_width = fHeader.u.raw_video.display_line_width; inOutFormat->u.raw_video.display.line_count = fHeader.u.raw_video.display_line_count; inOutFormat->u.raw_video.display.bytes_per_row = fHeader.u.raw_video.bytes_per_row;#ifdef TRACE_AV_CODEC char buffer[1024]; string_for_format(*inOutFormat, buffer, sizeof(buffer)); TRACE("[v] outFormat = %s/n", buffer); TRACE(" returned video format 0x%x/n", inOutFormat->u.raw_video.display.format);#endif return B_OK;}
开发者ID:MaddTheSane,项目名称:haiku,代码行数:100,
示例24: convert_image//.........这里部分代码省略......... if (width == -1) { width = pCodecCtx->width; } if (height == -1) { height = pCodecCtx->height; } codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG); if (!codec) { printf("avcodec_find_decoder() failed to find decoder/n"); goto fail; } codecCtx = avcodec_alloc_context3(codec); if (!codecCtx) { printf("avcodec_alloc_context3 failed/n"); goto fail; } codecCtx->bit_rate = pCodecCtx->bit_rate; //codecCtx->width = pCodecCtx->width; //codecCtx->height = pCodecCtx->height; codecCtx->width = width; codecCtx->height = height; codecCtx->pix_fmt = TARGET_IMAGE_FORMAT; codecCtx->codec_type = AVMEDIA_TYPE_VIDEO; codecCtx->time_base.num = pCodecCtx->time_base.num; codecCtx->time_base.den = pCodecCtx->time_base.den; codecCtx->mb_lmin = pCodecCtx->lmin = pCodecCtx->qmin * FF_QP2LAMBDA; codecCtx->mb_lmax = pCodecCtx->lmax = pCodecCtx->qmax * FF_QP2LAMBDA; codecCtx->flags = CODEC_FLAG_QSCALE; codecCtx->global_quality = pCodecCtx->qmin * FF_QP2LAMBDA; if (!codec || avcodec_open2(codecCtx, codec, NULL) < 0) { printf("avcodec_open2() failed/n"); goto fail; } frame = av_frame_alloc(); if (!frame) { goto fail; } uint8_t *dst_buffer = av_malloc (avpicture_get_size(TARGET_IMAGE_FORMAT,width, height)); avpicture_fill ((AVPicture *) frame, dst_buffer,TARGET_IMAGE_FORMAT, width, height); frame->pts = 1; frame->quality = pCodecCtx->global_quality; scalerCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, //pCodecCtx->width, //pCodecCtx->height, width, height, TARGET_IMAGE_FORMAT, SWS_BICUBIC , 0, 0, 0); if (!scalerCtx) { printf("sws_getContext() failed/n"); goto fail; } sws_scale(scalerCtx, (const uint8_t * const *) pFrame->data, pFrame->linesize, 0, pFrame->height, frame->data, frame->linesize); ret = avcodec_encode_video2(codecCtx, avpkt, frame, got_packet_ptr); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encoded Size: %d", ret); if (ret < 0) { *got_packet_ptr = 0; } fail: av_free(frame); if (codecCtx) { avcodec_close(codecCtx); av_free(codecCtx); } if (scalerCtx) { sws_freeContext(scalerCtx); } if (ret < 0 || !*got_packet_ptr) { av_free_packet(avpkt); } }
开发者ID:AirStash,项目名称:AirStashPlayer,代码行数:101,
示例25: ff_sws_freestatic void ff_sws_free(MSScalerContext *ctx){ MSFFScalerContext *fctx=(MSFFScalerContext*)ctx; if (fctx->ctx) sws_freeContext(fctx->ctx); ms_free(ctx);}
开发者ID:korobool,项目名称:linphonecdbus,代码行数:5,
示例26: sws_freeContextFFMpegEncoder::~FFMpegEncoder(){ sws_freeContext(swsCtx); close();}
开发者ID:maximlevitsky,项目名称:animations-editor-bdmorph,代码行数:5,
示例27: avcodec_find_encoderQByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height){ AVCodecContext *pOCodecCtx; AVCodec *pOCodec; QByteArray data; pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG); if (!pOCodec) { return data; } SwsContext *sws_ctx = sws_getContext( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, width, height, PIX_FMT_YUVJ420P, SWS_BICUBIC, NULL, NULL, NULL); if(!sws_ctx) { return data; }#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) AVFrame *pFrameRGB = av_frame_alloc();#else AVFrame *pFrameRGB = avcodec_alloc_frame();#endif if(pFrameRGB == NULL) { sws_freeContext(sws_ctx); return data; } int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height); uint8_t *buffer = (uint8_t *)av_malloc(numBytes); if(!buffer) {#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB);#else av_free(pFrameRGB);#endif sws_freeContext(sws_ctx); return data; } avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height); sws_scale( sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize ); pOCodecCtx = avcodec_alloc_context3(pOCodec); if(pOCodecCtx == NULL) {#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0) avcodec_free_context(&pOCodecCtx);#else avcodec_close(pOCodecCtx); av_free(pOCodecCtx);#endif av_free(buffer);#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB);#else av_free(&pFrameRGB);#endif sws_freeContext(sws_ctx); return 0; } pOCodecCtx->bit_rate = pCodecCtx->bit_rate; pOCodecCtx->width = width; pOCodecCtx->height = height; pOCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P; pOCodecCtx->codec_id = AV_CODEC_ID_MJPEG; pOCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pOCodecCtx->time_base.num = pCodecCtx->time_base.num; pOCodecCtx->time_base.den = pCodecCtx->time_base.den; AVDictionary *opts = NULL; if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) {#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0) avcodec_free_context(&pOCodecCtx);#else avcodec_close(pOCodecCtx); av_free(pOCodecCtx);#endif av_free(buffer);#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB);//.........这里部分代码省略.........
开发者ID:gonslo,项目名称:qcma,代码行数:101,
示例28: fa_image_from_video2//.........这里部分代码省略......... int cnt = MAX_FRAME_SCAN; while(1) { int r; r = av_read_frame(ifv_fctx, &pkt); if(r == AVERROR(EAGAIN)) continue; if(r == AVERROR_EOF) break; if(cancellable_is_cancelled(c)) { snprintf(errbuf, errlen, "Cancelled"); av_free_packet(&pkt); break; } if(r != 0) { ifv_close(); break; } if(pkt.stream_index != ifv_stream) { av_free_packet(&pkt); continue; } cnt--; int want_pic = pkt.pts >= ts || cnt <= 0; ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF; avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt); av_free_packet(&pkt); if(got_pic == 0 || !want_pic) { continue; } int w,h; if(im->im_req_width != -1 && im->im_req_height != -1) { w = im->im_req_width; h = im->im_req_height; } else if(im->im_req_width != -1) { w = im->im_req_width; h = im->im_req_width * ifv_ctx->height / ifv_ctx->width; } else if(im->im_req_height != -1) { w = im->im_req_height * ifv_ctx->width / ifv_ctx->height; h = im->im_req_height; } else { w = im->im_req_width; h = im->im_req_height; } pm = pixmap_create(w, h, PIXMAP_BGR32, 0); if(pm == NULL) { ifv_close(); snprintf(errbuf, errlen, "Out of memory"); av_free(frame); return NULL; } struct SwsContext *sws; sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt, w, h, AV_PIX_FMT_BGR32, SWS_BILINEAR, NULL, NULL, NULL); if(sws == NULL) { ifv_close(); snprintf(errbuf, errlen, "Scaling failed"); pixmap_release(pm); av_free(frame); return NULL; } uint8_t *ptr[4] = {0,0,0,0}; int strides[4] = {0,0,0,0}; ptr[0] = pm->pm_pixels; strides[0] = pm->pm_linesize; sws_scale(sws, (const uint8_t **)frame->data, frame->linesize, 0, ifv_ctx->height, ptr, strides); sws_freeContext(sws); write_thumb(ifv_ctx, frame, w, h, cacheid, mtime); break; } av_frame_free(&frame); if(pm == NULL) snprintf(errbuf, errlen, "Frame not found (scanned %d)", MAX_FRAME_SCAN - cnt); avcodec_flush_buffers(ifv_ctx); callout_arm(&thumb_flush_callout, ifv_autoclose, NULL, 5); return pm;}
开发者ID:tydaikho,项目名称:showtime,代码行数:101,
示例29: guacenc_video_prepare_framevoid guacenc_video_prepare_frame(guacenc_video* video, guacenc_buffer* buffer) { int lsize; int psize; /* Ignore NULL buffers */ if (buffer == NULL || buffer->surface == NULL) return; /* Obtain destination frame */ AVFrame* dst = video->next_frame; /* Determine width of image if height is scaled to match destination */ int scaled_width = buffer->width * dst->height / buffer->height; /* Determine height of image if width is scaled to match destination */ int scaled_height = buffer->height * dst->width / buffer->width; /* If height-based scaling results in a fit width, add pillarboxes */ if (scaled_width <= dst->width) { lsize = 0; psize = (dst->width - scaled_width) * buffer->height / dst->height / 2; } /* If width-based scaling results in a fit width, add letterboxes */ else { assert(scaled_height <= dst->height); psize = 0; lsize = (dst->height - scaled_height) * buffer->width / dst->width / 2; } /* Prepare source frame for buffer */ AVFrame* src = guacenc_video_frame_convert(buffer, lsize, psize); if (src == NULL) { guacenc_log(GUAC_LOG_WARNING, "Failed to allocate source frame. " "Frame dropped."); return; } /* Prepare scaling context */ struct SwsContext* sws = sws_getContext(src->width, src->height, AV_PIX_FMT_RGB32, dst->width, dst->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); /* Abort if scaling context could not be created */ if (sws == NULL) { guacenc_log(GUAC_LOG_WARNING, "Failed to allocate software scaling " "context. Frame dropped."); av_freep(&src->data[0]); av_frame_free(&src); return; } /* Apply scaling, copying the source frame to the destination */ sws_scale(sws, (const uint8_t* const*) src->data, src->linesize, 0, src->height, dst->data, dst->linesize); /* Free scaling context */ sws_freeContext(sws); /* Free source frame */ av_freep(&src->data[0]); av_frame_free(&src);}
开发者ID:Hatsize,项目名称:incubator-guacamole-server,代码行数:67,
注:本文中的sws_freeContext函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ sws_getCachedContext函数代码示例 C++ swr_init函数代码示例 |