您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ sws_getContext函数代码示例

51自学网 2021-06-03 08:36:08
  C++
这篇教程C++ sws_getContext函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中sws_getContext函数的典型用法代码示例。如果您正苦于以下问题:C++ sws_getContext函数的具体用法?C++ sws_getContext怎么用?C++ sws_getContext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了sws_getContext函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: video_resampler_initialize

// Initialize resampler//// Generic//     ::new(src_width, src_height, src_format, dst_factor)									- Resize by percentage//     ::new(src_width, src_height, src_format, dst_format)									- Change color format//     ::new(src_width, src_height, src_format, dst_width, dst_height)						- Resize to width and height//     ::new(src_width, src_height, src_format, dst_width, dst_height, filter)				- Resize with interpolation filter//     ::new(src_width, src_height, src_format, dst_width, dst_height, dst_format, filter)	- Resize with filter and change color format//// From Object//     ::new(source, dst_factor)															- Resize by percentage//     ::new(source, dst_format)															- Change color format//     ::new(source, dst_width, dst_height)													- Resize to width and height//     ::new(source, dst_width, dst_height, filter)											- Resize with interpolation filter//     ::new(source, dst_width, dst_height, dst_format, filter)								- Resize with filter and change color formatVALUE video_resampler_initialize(int argc, VALUE * argv, VALUE self) {    VideoResamplerInternal * internal;    Data_Get_Struct(self, VideoResamplerInternal, internal);    if (argc && TYPE(argv[0]) == T_FIXNUM) {        // Called generic form        if 		(argc < 4)	rb_raise(rb_eArgError, "Missing argument(s)");        else if (argc > 7)	rb_raise(rb_eArgError, "Too many arguments");        internal->src_width 	= NUM2INT(argv[0]);        internal->src_height	= NUM2INT(argv[1]);        internal->src_format	= symbol_to_av_pixel_format(argv[2]);        argc -= 3;        argv += 3;    }    else {        // Called with object        if 		(argc < 2)	rb_raise(rb_eArgError, "Missing argument(s)");        else if (argc > 5)	rb_raise(rb_eArgError, "Too many arguments");        internal->src_width 	= NUM2INT(rb_funcall(argv[0], rb_intern("width"), 0));        internal->src_height	= NUM2INT(rb_funcall(argv[0], rb_intern("height"), 0));        internal->src_format	= symbol_to_av_pixel_format(rb_funcall(argv[0], rb_intern("format"), 0));        argc -= 1;        argv += 1;    }    internal->dst_width		= internal->src_width;    internal->dst_height	= internal->src_height;    internal->dst_format	= internal->src_format;    internal->filter		= SWS_FAST_BILINEAR;    switch (argc) {    case 1: {        if (TYPE(argv[0]) != T_SYMBOL) {            // Resize by percentage            internal->dst_width	= (int)(internal->src_width  * NUM2DBL(argv[0]));            internal->dst_height = (int)(internal->src_height * NUM2DBL(argv[0]));        }        else {            // Change color format            internal->dst_format = symbol_to_av_pixel_format(argv[0]);        }        break;    }    case 2: {        // Resize to width and height        internal->dst_width = NUM2INT(argv[0]);        internal->dst_height = NUM2INT(argv[1]);        break;    }    case 3: {        // Resize to width and height using interpolation filter        internal->dst_width = NUM2INT(argv[0]);        internal->dst_height = NUM2INT(argv[1]);        internal->filter = symbol_to_interpolation_filter(argv[2]);        break;    }    case 4: {        // Resize to width and height using interpolation filter and change color format        internal->dst_width = NUM2INT(argv[0]);        internal->dst_height = NUM2INT(argv[1]);        internal->dst_format = symbol_to_av_pixel_format(argv[2]);        internal->filter = symbol_to_interpolation_filter(argv[3]);        break;    }    }    if (internal->src_format == PIX_FMT_NONE) rb_raise(rb_eArgError, "Unknown input color format");    if (internal->dst_format == PIX_FMT_NONE) rb_raise(rb_eArgError, "Unknown output color format");    if (internal->filter == 0) rb_raise(rb_eArgError, "Unknown interpolation method");    // Create scaler context    internal->context = sws_getContext(internal->src_width,                                       internal->src_height,                                       internal->src_format,                                       internal->dst_width,                                       internal->dst_height,                                       internal->dst_format,                                       internal->filter,                                       NULL,                                       NULL,                                       NULL);//.........这里部分代码省略.........
开发者ID:noelcode,项目名称:ruby-ffmpeg,代码行数:101,


示例2: stream_component_open

int stream_component_open(VideoState *is, int stream_index) {  AVFormatContext *pFormatCtx = is->pFormatCtx;  AVCodecContext *codecCtx = NULL;  AVCodec *codec = NULL;  AVDictionary *optionsDict = NULL;  SDL_AudioSpec wanted_spec, spec;  if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {    return -1;  }  // Get a pointer to the codec context for the video stream  codecCtx = pFormatCtx->streams[stream_index]->codec;  if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {    // Set audio settings from codec info    wanted_spec.freq = codecCtx->sample_rate;    wanted_spec.format = AUDIO_S16SYS;    wanted_spec.channels = codecCtx->channels;    wanted_spec.silence = 0;    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;    wanted_spec.callback = audio_callback;    wanted_spec.userdata = is;    if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {      fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());      return -1;    }    is->audio_hw_buf_size = spec.size;  }  codec = avcodec_find_decoder(codecCtx->codec_id);  if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) {    fprintf(stderr, "Unsupported codec!/n");    return -1;  }  switch(codecCtx->codec_type) {  case AVMEDIA_TYPE_AUDIO:    is->audioStream = stream_index;    is->audio_st = pFormatCtx->streams[stream_index];    is->audio_buf_size = 0;    is->audio_buf_index = 0;    /* averaging filter for audio sync */    is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB));    is->audio_diff_avg_count = 0;    /* Correct audio only if larger error than this */    is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate;	is->sws_ctx_audio = swr_alloc();	if (!is->sws_ctx_audio) {		fprintf(stderr, "Could not allocate resampler context/n");		return -1;	}    memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));    packet_queue_init(&is->audioq);    SDL_PauseAudio(0);    break;  case AVMEDIA_TYPE_VIDEO:    is->videoStream = stream_index;    is->video_st = pFormatCtx->streams[stream_index];    is->frame_timer = (double)av_gettime() / 1000000.0;    is->frame_last_delay = 40e-3;    is->video_current_pts_time = av_gettime();    packet_queue_init(&is->videoq);    is->video_tid = SDL_CreateThread(video_thread, is);    is->sws_ctx =        sws_getContext        (            is->video_st->codec->width,            is->video_st->codec->height,            is->video_st->codec->pix_fmt,            is->video_st->codec->width,            is->video_st->codec->height,            AV_PIX_FMT_YUV420P,            SWS_BILINEAR,            NULL,            NULL,            NULL        );    codecCtx->get_buffer2 = our_get_buffer;    codecCtx->release_buffer = our_release_buffer;    break;  default:    break;  }  return 0;}
开发者ID:feixiao,项目名称:ffmpeg-tutorial,代码行数:90,


示例3: strcpy

//.........这里部分代码省略.........	iTotalFrameNum = 0;	iNowFrameNum = 0;	frameFinished = 0;	// Register all formats and codecs	av_register_all();	// Open video file	if(av_open_input_file(&pFormatCtx, chVidName, NULL, 0, NULL)!=0)	{		bIfSuccess = false;		return; // Couldn't open file	}	// Retrieve stream information	if(av_find_stream_info(pFormatCtx)<0)	{		bIfSuccess = false;		return; // Couldn't find stream information	}	// Dump information about file onto standard error	dump_format(pFormatCtx, 0, chVidName, 0);	this->iTotalFrameNum = pFormatCtx->streams[0]->duration;	this->fFrmRat = pFormatCtx->streams[0]->r_frame_rate.num/(float)(pFormatCtx->streams[0]->r_frame_rate.den);	// Find the first video stream	videoStream=-1;	for(i=0; i<pFormatCtx->nb_streams; i++)	{		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {			videoStream=i;			break;		}	}	if(videoStream==-1)	{		bIfSuccess = false;		return; // Didn't find a video stream	}	// Get a pointer to the codec context for the video stream	pCodecCtx=pFormatCtx->streams[videoStream]->codec;	// Find the decoder for the video stream	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);	if(pCodec==NULL) {		bIfSuccess = false;		fprintf(stderr, "Unsupported codec!/n");		return; // Codec not found	}	// Open codec	while (avcodec_open(pCodecCtx, pCodec) < 0)/*这个函数总是返回-1*/ {		Sleep(this->iProcessOrder);	}	// Allocate video frame	pFrameOri=avcodec_alloc_frame();	// Allocate an AVFrame structure	pFrameBGR=avcodec_alloc_frame();	if(pFrameBGR==NULL)	{		bIfSuccess = false;		return;	}	pFrameRGB=avcodec_alloc_frame();	if(pFrameRGB==NULL)	{		bIfSuccess = false;		return;	}	// Determine required buffer size and allocate buffer	numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width,pCodecCtx->height);	imageFrame->height = pCodecCtx->height;	imageFrame->width = pCodecCtx->width;	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));	imageFrame->imageData = new uint8_t[numBytes*sizeof(uint8_t)];		// Assign appropriate parts of buffer to image planes in pFrameRGB	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset	// of AVPicture	avpicture_fill((AVPicture *)pFrameBGR, buffer, PIX_FMT_BGR24,		pCodecCtx->width, pCodecCtx->height);	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,		pCodecCtx->width, pCodecCtx->height);		//注意,这里是PIX_FMT_RGB24,它决定了图片的格式	if(this->bIfUseHD == false)		ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,		PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);	this->getOneFrame();	bIfSuccess = true;}
开发者ID:blackshadowwalker,项目名称:VLPRDemo,代码行数:101,


示例4: while

void VideoThread::readFrame(){// 	qDebug() << "VideoThread::readFrame(): start of function";	if(!m_inited)	{// 		qDebug() << "VideoThread::readFrame(): not inited";		//emit frameReady(m_nextDelay);		return;	}				AVPacket pkt1, *packet = &pkt1;	double pts;	// 	qDebug() << "VideoThread::readFrame(): killed: "<<m_killed;	int frame_finished = 0;	while(!frame_finished && !m_killed)	{		if(av_read_frame(m_av_format_context, packet) >= 0)		{			// Is this a packet from the video stream?			if(packet->stream_index == m_video_stream)			{				//global_video_pkt_pts = packet->pts;				#ifdef USE_DECODE_VID2				avcodec_decode_video2(m_video_codec_context, m_av_frame, &frame_finished, packet);				#else				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet->data, packet->size);				#endif				if(packet->dts == AV_NOPTS_VALUE &&						  m_av_frame->opaque &&				  *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)				{					pts = *(uint64_t *)m_av_frame->opaque;				}				else if(packet->dts != AV_NOPTS_VALUE)				{					pts = packet->dts;				}				else				{					pts = 0;				}				pts *= av_q2d(m_timebase);				// Did we get a video frame?				if(frame_finished)				{										// Convert the image from its native format to RGB, then copy the image data to a QImage					if(m_sws_context == NULL)					{						//mutex.lock();						m_sws_context = sws_getContext(							m_video_codec_context->width, m_video_codec_context->height,							m_video_codec_context->pix_fmt,							m_video_codec_context->width, m_video_codec_context->height,							//PIX_FMT_RGB32,SWS_BICUBIC,							//PIX_FMT_RGB565, SWS_FAST_BILINEAR,							PIX_FMT_RGB32, SWS_FAST_BILINEAR,							//PIX_FMT_YUV420P, SWS_FAST_BILINEAR,							NULL, NULL, NULL); //SWS_PRINT_INFO						//mutex.unlock();						//printf("decode(): created m_sws_context/n");					}					//printf("decode(): got frame/n");					sws_scale(m_sws_context,						  m_av_frame->data,						  m_av_frame->linesize, 0,						  m_video_codec_context->height,						  m_av_rgb_frame->data,						  m_av_rgb_frame->linesize);					//m_bufferMutex.lock();// 					qDebug() << "VideoThread: void*:"<<(void*)m_av_rgb_frame->data[0];					QImage frame = QImage(m_av_rgb_frame->data[0],								m_video_codec_context->width,								m_video_codec_context->height,								//QImage::Format_RGB16);								QImage::Format_ARGB32);					//m_bufferMutex.unlock();										av_free_packet(packet);					// This block from the synchronize_video(VideoState *is, AVFrame *src_frame, double pts) : double					// function given at: http://dranger.com/ffmpeg/tutorial05.html					{						// update the frame pts						double frame_delay;						if(pts != 0)						{							/* if we have pts, set video clock to it */							m_video_clock = pts;						} else {//.........这里部分代码省略.........
开发者ID:TritonSailor,项目名称:livepro,代码行数:101,


示例5: item

//.........这里部分代码省略.........      CLog::Log(LOGDEBUG,"%s - seeking to pos %dms (total: %dms) in %s", __FUNCTION__, nSeekTo, nTotalLen, redactPath.c_str());      if (pDemuxer->SeekTime(nSeekTo, true))      {        int iDecoderState = VC_ERROR;        DVDVideoPicture picture;        memset(&picture, 0, sizeof(picture));        // num streams * 160 frames, should get a valid frame, if not abort.        int abort_index = pDemuxer->GetNrOfStreams() * 160;        do        {          DemuxPacket* pPacket = pDemuxer->Read();          packetsTried++;          if (!pPacket)            break;          if (pPacket->iStreamId != nVideoStream)          {            CDVDDemuxUtils::FreeDemuxPacket(pPacket);            continue;          }          iDecoderState = pVideoCodec->Decode(pPacket->pData, pPacket->iSize, pPacket->dts, pPacket->pts);          CDVDDemuxUtils::FreeDemuxPacket(pPacket);          if (iDecoderState & VC_ERROR)            break;          if (iDecoderState & VC_PICTURE)          {            memset(&picture, 0, sizeof(DVDVideoPicture));            if (pVideoCodec->GetPicture(&picture))            {              if(!(picture.iFlags & DVP_FLAG_DROPPED))                break;            }          }        } while (abort_index--);        if (iDecoderState & VC_PICTURE && !(picture.iFlags & DVP_FLAG_DROPPED))        {          {            unsigned int nWidth = g_advancedSettings.m_imageRes;            double aspect = (double)picture.iDisplayWidth / (double)picture.iDisplayHeight;            if(hint.forced_aspect && hint.aspect != 0)              aspect = hint.aspect;            unsigned int nHeight = (unsigned int)((double)g_advancedSettings.m_imageRes / aspect);            uint8_t *pOutBuf = new uint8_t[nWidth * nHeight * 4];            struct SwsContext *context = sws_getContext(picture.iWidth, picture.iHeight,                  AV_PIX_FMT_YUV420P, nWidth, nHeight, AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL);            if (context)            {              uint8_t *src[] = { picture.data[0], picture.data[1], picture.data[2], 0 };              int     srcStride[] = { picture.iLineSize[0], picture.iLineSize[1], picture.iLineSize[2], 0 };              uint8_t *dst[] = { pOutBuf, 0, 0, 0 };              int     dstStride[] = { (int)nWidth*4, 0, 0, 0 };              int orientation = DegreeToOrientation(hint.orientation);              sws_scale(context, src, srcStride, 0, picture.iHeight, dst, dstStride);              sws_freeContext(context);              details.width = nWidth;              details.height = nHeight;              CPicture::CacheTexture(pOutBuf, nWidth, nHeight, nWidth * 4, orientation, nWidth, nHeight, CTextureCache::GetCachedPath(details.file));              bOk = true;            }            delete [] pOutBuf;          }        }        else        {          CLog::Log(LOGDEBUG,"%s - decode failed in %s after %d packets.", __FUNCTION__, redactPath.c_str(), packetsTried);        }      }      delete pVideoCodec;    }  }  if (pDemuxer)    delete pDemuxer;  delete pInputStream;  if(!bOk)  {    XFILE::CFile file;    if(file.OpenForWrite(CTextureCache::GetCachedPath(details.file)))      file.Close();  }  unsigned int nTotalTime = XbmcThreads::SystemClockMillis() - nTime;  CLog::Log(LOGDEBUG,"%s - measured %u ms to extract thumb from file <%s> in %d packets. ", __FUNCTION__, nTotalTime, redactPath.c_str(), packetsTried);  return bOk;}
开发者ID:Admirali78,项目名称:xbmc,代码行数:101,


示例6: ffmpeg_jpeg_encode

int ffmpeg_jpeg_encode(unsigned char *srcBuf,unsigned char* dstBuf,int dstBufSize,PixelFormat srcPixFmt,int srcWidth,int srcHeight,int qvalue){    AVCodec *codec;    AVCodecContext *c= NULL;        printf("Video encoding/n");    /* find the mpeg1 video encoder */    codec = avcodec_find_encoder(CODEC_ID_MJPEG);    if (!codec) {        fprintf(stderr, "codec not found/n");        return -1;    }    c= avcodec_alloc_context();	c->qmin = qvalue;	c->qmax = qvalue;    /* resolution must be a multiple of two */    c->width = srcWidth;    c->height = srcHeight;    	c->time_base.den = 25;	c->time_base.num = 1;    c->max_b_frames=0;    c->pix_fmt = PIX_FMT_YUVJ420P;    /* open it */    if (avcodec_open(c, codec) < 0) {        fprintf(stderr, "could not open codec/n");        return -2;    }	//prepare colorspace conversion	//TODO: factor to util.	AVPicture *pPicSrc = (AVPicture*)malloc(sizeof(AVPicture));	int srcBufSize =  avpicture_get_size(srcPixFmt,srcWidth,srcHeight);	avpicture_fill(pPicSrc,srcBuf,srcPixFmt,srcWidth,srcHeight);	AVFrame *pPicScaled = (AVFrame*)malloc(sizeof(AVFrame));	int scaleBufSize =  avpicture_get_size(c->pix_fmt,srcWidth,srcHeight);	unsigned char *scaleBuf = (unsigned char*)malloc(scaleBufSize);	avpicture_fill((AVPicture*)pPicScaled,scaleBuf,c->pix_fmt,srcWidth,srcHeight);    SwsContext *img_convert_ctx = sws_getContext(		srcWidth, srcHeight, srcPixFmt,		srcWidth, srcHeight, c->pix_fmt,		SWS_BICUBIC, NULL, NULL, NULL);	if (img_convert_ctx == NULL)	{		printf("can not create colorspace converter!/n");		return -3;	}	int ret = sws_scale(img_convert_ctx,		pPicSrc->data, pPicSrc->linesize,		0, srcHeight,		pPicScaled->data, pPicScaled->linesize);	if (ret < 0)	{		printf("color space conversion failed!/n");		return -4;	}	//encode	int out_size = avcodec_encode_video(c, dstBuf, dstBufSize, pPicScaled);    	if (out_size < 0)	{		printf("encode failed!/n");		return -5;	}    avcodec_close(c);    av_free(c);	av_free(pPicSrc);    av_free(pPicScaled);	free(scaleBuf); 	return out_size;}
开发者ID:jdzyzh,项目名称:ffmpeg-wrapper,代码行数:83,


示例7: frnum

qav::qvideo::qvideo(const char* file, int _out_width, int _out_height) : frnum(0), videoStream(-1), out_width(_out_width),out_height(_out_height), pFormatCtx(NULL), pCodecCtx(NULL), pCodec(NULL), pFrame(NULL), img_convert_ctx(NULL) {	const char* pslash = strrchr(file, '/');	if (pslash)        	fname = pslash+1;    	else        	fname = file;	if (avformat_open_input(&pFormatCtx, file, NULL, NULL) < 0) {        	free_resources();        	throw std::runtime_error("Can't open file");    	}    	if (avformat_find_stream_info(pFormatCtx, NULL)<0) {        	free_resources();        	throw std::runtime_error("Multimedia type not supported");    	}    	LOG_INFO << "File info for (" << file << ")" << std::endl;    	av_dump_format(pFormatCtx, 0, file, false);    	// find video stream (first)    	for (unsigned int i=0; i<pFormatCtx->nb_streams; i++) {        	if (AVMEDIA_TYPE_VIDEO == pFormatCtx->streams[i]->codec->codec_type) {            		videoStream=i;            		break;        	}    	}    	if (-1==videoStream) {        	free_resources();        	throw std::runtime_error("Can't find video stream");    	}    	// Get a pointer to the codec context for the video stream    	pCodecCtx=pFormatCtx->streams[videoStream]->codec;    	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);    	if(!pCodec) {        	free_resources();        	throw std::runtime_error("Can't find codec for video stream");    	}    	if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {        	free_resources();        	throw std::runtime_error("Can't open codec for video stream");    	}    	// alloacate data to extract frames    	pFrame = av_frame_alloc();    	if (!pFrame) {        	free_resources();        	throw std::runtime_error("Can't allocated frame for video stream");    	}    	// populate the out_width/out_height members    	if (out_width > 0 && out_height > 0) {        	LOG_INFO << "Output frame size for (" << file << ") is: " << out_width << 'x' << out_height << std::endl;    	} else if (-1 == out_width && -1 == out_height) {        	out_width = pCodecCtx->width;        	out_height = pCodecCtx->height;        	LOG_INFO << "Output frame size for (" << file << ") (default) is: " << out_width << 'x' << out_height << std::endl;    	} else {        	free_resources();        	throw std::runtime_error("Invalid output frame size for video stream");    	}    	// just report if we're using a different video size    	if (out_width!=pCodecCtx->width || out_height!=pCodecCtx->height)        	LOG_WARNING << "Video (" << file <<") will get scaled: " << pCodecCtx->width << 'x' << pCodecCtx->height << " (in), " << out_width << 'x' << out_height << " (out)" << std::endl;    	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, out_width, out_height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);    	if (!img_convert_ctx) {        	free_resources();        	throw std::runtime_error("Can't allocated sw_scale context");    	}}
开发者ID:FallingSnow,项目名称:qpsnr,代码行数:68,


示例8: update_sws_context

bool update_sws_context(struct ffmpeg_source *s, AVFrame *frame){	if (frame->width != s->sws_width			|| frame->height != s->sws_height			|| frame->format != s->sws_format) {		if (s->sws_ctx != NULL)			sws_freeContext(s->sws_ctx);		if (frame->width <= 0 || frame->height <= 0) {			FF_BLOG(LOG_ERROR, "unable to create a sws "					"context that has a width(%d) or "					"height(%d) of zero.", frame->width,					frame->height);			goto fail;		}		s->sws_ctx = sws_getContext(			frame->width,			frame->height,			frame->format,			frame->width,			frame->height,			AV_PIX_FMT_BGRA,			SWS_BILINEAR,			NULL, NULL, NULL);		if (s->sws_ctx == NULL) {			FF_BLOG(LOG_ERROR, "unable to create sws "					"context with src{w:%d,h:%d,f:%d}->"					"dst{w:%d,h:%d,f:%d}",					frame->width, frame->height,					frame->format, frame->width,					frame->height, AV_PIX_FMT_BGRA);			goto fail;		}		if (s->sws_data != NULL)			bfree(s->sws_data);		s->sws_data = bzalloc(frame->width * frame->height * 4);		if (s->sws_data == NULL) {			FF_BLOG(LOG_ERROR, "unable to allocate sws "					"pixel data with size %d",					frame->width * frame->height * 4);			goto fail;		}		s->sws_linesize = frame->width * 4;		s->sws_width = frame->width;		s->sws_height = frame->height;		s->sws_format = frame->format;	}	return true;fail:	if (s->sws_ctx != NULL)		sws_freeContext(s->sws_ctx);	s->sws_ctx = NULL;	if (s->sws_data)		bfree(s->sws_data);	s->sws_data = NULL;	s->sws_linesize = 0;	s->sws_width = 0;	s->sws_height = 0;	s->sws_format = 0;	return false;}
开发者ID:DaveDaCoda,项目名称:obs-studio,代码行数:71,


示例9: av_free

/** * 设置相机的预览大小 和 用户的预览的比例 */int Recorder::set_preview_size_and_ratio(int width, int height, float ratio){	if(0 >= width || 0 >= height || 0 >= ratio)		return -1;	/**	 * 根据传入的参数重新设置当前缩放的属性	 */	if(NULL != src_frame_buffer)		av_free(src_frame_buffer);	if(NULL != dst_frame_buffer)		av_free(dst_frame_buffer);	if(NULL != src_frame)		av_frame_free(&src_frame);	if(NULL != dst_frame)		av_frame_free(&dst_frame);	if (NULL != sws_ctx)		sws_freeContext(sws_ctx);	src_width  = -1;	src_height = -1;	src_frame_buffer = NULL;	dst_width = -1;	dst_height = -1;	dst_frame_buffer = NULL;	src_frame = NULL;	dst_frame = NULL;	sws_ctx = NULL;	/**	 * 根据camera预览比例 和 用于看到的比例重新计算大小	 */	//注意图片还需要进行旋转 camera的图片方向和用户的预览不一致	if (1.0 * height / width > ratio)	{		src_width  = (int)(1.0 * ratio * width);		src_height = width;	}	else	{		src_width = height;		src_height = (int)(1.0 * height / ratio);	}	//标准化成 2的倍数	src_width  -= src_width  % 2;	src_height -= src_height % 2;	/**	 * 只进行转码 而不进行缩放操作	 */	dst_width  = src_width;	dst_height = src_height;	/**	 * 初始化存储原始图片的内存	 */	src_frame = av_frame_alloc();	if(!src_frame)		return -1;	int src_frame_size = avpicture_get_size(SRC_VIDEO_PIX_FMT, src_width, src_height);	src_frame_buffer = (uint8_t*)av_malloc(src_frame_size);	avpicture_fill((AVPicture *)src_frame, src_frame_buffer, SRC_VIDEO_PIX_FMT, src_width, src_height);	src_frame->width  = src_width;	src_frame->height = src_height;	src_frame->format = SRC_VIDEO_PIX_FMT;	/**	 * 初始化内存	 */	dst_frame = av_frame_alloc();	if(!dst_frame)		return -1;	int dst_frame_size = avpicture_get_size(VIDEO_PIX_FMT, dst_width, dst_height);	dst_frame_buffer = (uint8_t*)av_malloc(dst_frame_size);	avpicture_fill((AVPicture *)dst_frame, dst_frame_buffer, VIDEO_PIX_FMT, dst_width, dst_height);	dst_frame->width = dst_width;	dst_frame->height = dst_height;	dst_frame->format = VIDEO_PIX_FMT;	/**	 * 缩放函数	 */	//初始化图片缩放函数 从ccut_video_height 缩放到 FRAME_HEIGHT	sws_ctx = sws_getContext(src_width, src_height, SRC_VIDEO_PIX_FMT,			dst_width, dst_height, VIDEO_PIX_FMT,//.........这里部分代码省略.........
开发者ID:amazingyyc,项目名称:DeepRed,代码行数:101,


示例10: JPG_to_Pixel

int JPG_to_Pixel(const unsigned char *jpgBuff, int jpgSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *pixelBuff, int *pixelSize) {		AVFormatContext *formatContext;	AVInputFormat *inputFormat;	AVIOContext *ioContext;	AVStream *stream;	AVCodecContext *codecContext;	AVCodec *codec;	AVFrame *frame, *frame2;	AVPacket packet;	struct SwsContext *swsContext;	int streamIndex;	int gotFrame;	int codecRet;	int result = -1;	av_register_all();	formatContext = avformat_alloc_context();	ioContext = avio_alloc_context((unsigned char *)jpgBuff, jpgSize, 0, NULL, NULL, NULL, NULL);	inputFormat = av_find_input_format("mjpeg");	av_probe_input_buffer2(ioContext, &inputFormat, NULL, NULL, 0, 0);	formatContext->pb = ioContext;	formatContext->iformat = inputFormat;	avformat_open_input(&formatContext, NULL, NULL, NULL);	av_find_stream_info(formatContext);	av_init_packet(&packet);	for (streamIndex = 0; streamIndex < formatContext->nb_streams; streamIndex++) {		av_read_frame(formatContext, &packet);		if (formatContext->streams[streamIndex]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 0 < packet.size) {			stream = formatContext->streams[streamIndex];			codecContext = stream->codec;			codec = avcodec_find_decoder(codecContext->codec_id);			avcodec_open2(codecContext, codec, NULL);			frame = avcodec_alloc_frame();			codecRet = avcodec_decode_video2(codecContext, frame, &gotFrame, &packet);			if (0 <= codecRet && 1 == gotFrame) {				frame2 = av_frame_clone(frame);				frame2->format = PF(pixelFmt);				swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, pixelWidth, pixelHeight, (AVPixelFormat)frame2->format, SWS_BICUBIC, NULL, NULL, NULL);   				sws_scale(swsContext, (const uint8_t *const *)frame->data, frame->linesize, 0, codecContext->height, frame2->data, frame2->linesize);  				sws_freeContext(swsContext);				*pixelSize = avpicture_layout((const AVPicture *)frame2, (enum AVPixelFormat)frame2->format, pixelWidth, pixelHeight, pixelBuff, *pixelSize);				result = *pixelSize;				av_frame_free(&frame2);			}				if (1 == codecContext->refcounted_frames) av_frame_unref(frame); 			avcodec_free_frame(&frame);			avcodec_close(codecContext);		}		av_free_packet(&packet);		if (-1 != result)			break;	}	avformat_close_input(&formatContext);	av_free(ioContext->buffer);	av_free(ioContext);	avformat_free_context(formatContext);	return result;}
开发者ID:SangYang,项目名称:GeneralFeatures,代码行数:62,


示例11: avformat_close_input

int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams){	AVFormatContext *formatCtx = NULL;	int				i, videoStream;	AVCodec			*codec;	AVCodecContext	*codecCtx;	if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)		return -1;	if (avformat_find_stream_info(formatCtx, NULL) < 0)	{		avformat_close_input(&formatCtx);		return -1;	}	/* Find the first video stream */	videoStream=-1;	for (i=0; i<formatCtx->nb_streams; i++)	{		if (formatCtx->streams[i] &&			get_codec_from_stream(formatCtx->streams[i]) && 			(get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))		{			videoStream=i;			break;		}	}	if (videoStream==-1) 	{		avformat_close_input(&formatCtx);		return -1;	}	codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);	/* Find the decoder for the video stream */	codec=avcodec_find_decoder(codecCtx->codec_id);	if (codec==NULL) 	{		avformat_close_input(&formatCtx);		return -1;	}	codecCtx->workaround_bugs = 1;	if (avcodec_open2(codecCtx, codec, NULL) < 0)	{		avformat_close_input(&formatCtx);		return -1;	}#ifdef FFMPEG_OLD_FRAME_RATE	if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)		codecCtx->frame_rate_base=1000;	m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;#else	m_baseFrameRate = av_q2d(av_get_r_frame_rate_compat(formatCtx->streams[videoStream]));#endif	if (m_baseFrameRate <= 0.0) 		m_baseFrameRate = defFrameRate;	m_codec = codec;	m_codecCtx = codecCtx;	m_formatCtx = formatCtx;	m_videoStream = videoStream;	m_frame = avcodec_alloc_frame();	m_frameDeinterlaced = avcodec_alloc_frame();	// allocate buffer if deinterlacing is required	avpicture_fill((AVPicture*)m_frameDeinterlaced, 		(uint8_t*)MEM_callocN(avpicture_get_size(		m_codecCtx->pix_fmt,		m_codecCtx->width, m_codecCtx->height), 		"ffmpeg deinterlace"), 		m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);	// check if the pixel format supports Alpha	if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||		m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||		m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||		m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 	{		// allocate buffer to store final decoded frame		m_format = RGBA32;		// allocate sws context		m_imgConvertCtx = sws_getContext(			m_codecCtx->width,			m_codecCtx->height,			m_codecCtx->pix_fmt,			m_codecCtx->width,			m_codecCtx->height,			PIX_FMT_RGBA,			SWS_FAST_BILINEAR,			NULL, NULL, NULL);	} else	{		// allocate buffer to store final decoded frame		m_format = RGB24;		// allocate sws context		m_imgConvertCtx = sws_getContext(//.........这里部分代码省略.........
开发者ID:GeniaPenksik,项目名称:blender,代码行数:101,


示例12: avformat_new_stream

//.........这里部分代码省略.........	c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;	c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;	c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;#if 0	/* this options are not set in ffmpeg.c and leads to artifacts with MPEG-4	 * see #33586: Encoding to mpeg4 makes first frame(s) blocky	 */	c->rc_initial_buffer_occupancy = rd->ffcodecdata.rc_buffer_size * 3 / 4;	c->rc_buffer_aggressivity = 1.0;#endif	c->me_method = ME_EPZS;		codec = avcodec_find_encoder(c->codec_id);	if (!codec)		return NULL;		/* Be sure to use the correct pixel format(e.g. RGB, YUV) */	if (codec->pix_fmts) {		c->pix_fmt = codec->pix_fmts[0];	}	else {		/* makes HuffYUV happy ... */		c->pix_fmt = PIX_FMT_YUV422P;	}	if (ffmpeg_type == FFMPEG_XVID) {		/* arghhhh ... */		c->pix_fmt = PIX_FMT_YUV420P;		c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');	}	if (codec_id == AV_CODEC_ID_H264) {		/* correct wrong default ffmpeg param which crash x264 */		c->qmin = 10;		c->qmax = 51;	}		/* Keep lossless encodes in the RGB domain. */	if (codec_id == AV_CODEC_ID_HUFFYUV) {		/* HUFFYUV was PIX_FMT_YUV422P before */		c->pix_fmt = PIX_FMT_RGB32;	}	if (codec_id == AV_CODEC_ID_FFV1) {		c->pix_fmt = PIX_FMT_RGB32;	}	if (codec_id == AV_CODEC_ID_QTRLE) {		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {			c->pix_fmt = PIX_FMT_ARGB;		}	}	if (codec_id == AV_CODEC_ID_PNG) {		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {			c->pix_fmt = PIX_FMT_RGBA;		}	}	if ((of->oformat->flags & AVFMT_GLOBALHEADER)#if 0	    || !strcmp(of->oformat->name, "mp4")	    || !strcmp(of->oformat->name, "mov")	    || !strcmp(of->oformat->name, "3gp")#endif	    )	{		PRINT("Using global header/n");		c->flags |= CODEC_FLAG_GLOBAL_HEADER;	}		/* Determine whether we are encoding interlaced material or not */	if (rd->mode & R_FIELDS) {		PRINT("Encoding interlaced video/n");		c->flags |= CODEC_FLAG_INTERLACED_DCT;		c->flags |= CODEC_FLAG_INTERLACED_ME;	}	/* xasp & yasp got float lately... */	st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(((double) rd->xasp / (double) rd->yasp), 255);	set_ffmpeg_properties(rd, c, "video", &opts);	if (avcodec_open2(c, codec, &opts) < 0) {		BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);		av_dict_free(&opts);		return NULL;	}	av_dict_free(&opts);	current_frame = alloc_picture(c->pix_fmt, c->width, c->height);	img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,	                                 NULL, NULL, NULL);	return st;}
开发者ID:sonyomega,项目名称:blender-git,代码行数:101,


示例13: Java_com_example_testffmpeg_CFFmpegJni_IPlay

/* * Class:     com_example_testffmpeg_CFFmpegJni * Method:    IPlay * Signature: ()I */jint Java_com_example_testffmpeg_CFFmpegJni_IPlay(JNIEnv *env, jobject thiz){	/// 定义返回值	int nRet = -1;	/// 打开文件	if(NULL != m_pFormatCtx)	{		avformat_close_input(&m_pFormatCtx);		/// 释放数据		av_free(m_pFormatCtx);		m_pFormatCtx = NULL;	}	if(NULL == m_pFormatCtx)	{		/// 打开文件		if(0 != (nRet = avformat_open_input(&m_pFormatCtx, m_szURLPath, 0, NULL/*&m_pDictOptions*/)))		{			char szTemp[256];			memset(szTemp, 0x00, sizeof(szTemp));			av_strerror(nRet, szTemp, 255);			/// 打印错误信息			LOGD("%s, Error Code = %d, %s, Error = %s", m_szURLPath, nRet,					" The Error URL Or Path--------------->", szTemp);			return nRet;		}	}	// m_pFormatCtx->max_analyze_duration = 1000;	// m_pFormatCtx->probesize = 2048;	if(0 > avformat_find_stream_info(m_pFormatCtx, NULL))	{		LOGD("Couldn't find stream information.");		return -1;	}	int nVideoIndex = -1;	for(int i = 0; i < m_pFormatCtx->nb_streams; i++)	{		if(AVMEDIA_TYPE_VIDEO == m_pFormatCtx->streams[i]->codec->codec_type)		{			nVideoIndex = i;			break;		}	}	if(-1 == nVideoIndex)	{		LOGD("Didn't find a video stream.");		return -1;	}	AVCodecContext* pCodecCtx = m_pFormatCtx->streams[nVideoIndex]->codec;	AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);	if(NULL == pCodec)	{		LOGD("Codec not found.");		return -1;	}	if(pCodec->capabilities & CODEC_CAP_TRUNCATED)	{		pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;	}	if(0 > avcodec_open2(pCodecCtx, pCodec, NULL))	{		LOGD("Could not open codec.");		return -1;	}	/// 声明数据帧变量	AVFrame	*pFrame = NULL, *pFrameYUV = NULL;	pFrame = avcodec_alloc_frame();	pFrameYUV = avcodec_alloc_frame();	/// 创建转换数据缓冲	int nConvertSize = avpicture_get_size(PIX_FMT_RGB565, iWidth, iHeight);	uint8_t* pConvertbuffer = new uint8_t[nConvertSize];	avpicture_fill((AVPicture *)pFrameYUV, pConvertbuffer, PIX_FMT_RGB565, iWidth, iHeight);	/// 声明解码参数	int nCodecRet, nHasGetPicture;	/// 声明数据帧解码数据包	int nPackgeSize  = pCodecCtx->width * pCodecCtx->height;	AVPacket* pAVPacket = (AVPacket *)malloc(sizeof(AVPacket));	av_new_packet(pAVPacket, nPackgeSize);	/// 列出输出文件的相关流信息	av_dump_format(m_pFormatCtx, 0, m_szURLPath, 0);	/// 设置播放状态	m_bIsPlaying = true;	/// 声明格式转换参数	struct SwsContext* img_convert_ctx = NULL;	/// 格式化像素格式为YUV	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,//.........这里部分代码省略.........
开发者ID:hongyunxp,项目名称:mxplayer,代码行数:101,


示例14: calloc

/* reads a complete image as is into image buffer */static image *pngRead(skin_t *skin, const char *fname){    unsigned int i;    guiImage bmp;    image *bf;    char *filename = NULL;    FILE *fp;    if(!strcasecmp(fname, "NULL")) return 0;    /* find filename in order file file.png */    if(!(fp = fopen(fname, "rb")))    {        filename = calloc(1, strlen(skin->skindir) + strlen(fname) + 6);        sprintf(filename, "%s/%s.png", skin->skindir, fname);        if(!(fp = fopen(filename, "rb")))        {            mp_msg(MSGT_GPLAYER, MSGL_ERR, "[png] cannot find image %s/n", filename);            free(filename);            return 0;        }    }    fclose(fp);    for (i=0; i < skin->imagecount; i++)        if(!strcmp(fname, skin->images[i]->name))        {            mp_msg(MSGT_GPLAYER, MSGL_DBG2, "[png] skinfile %s already exists/n", fname);            free(filename);            return skin->images[i];        }    (skin->imagecount)++;    skin->images = realloc(skin->images, sizeof(image *) * skin->imagecount);    bf = skin->images[(skin->imagecount) - 1] = calloc(1, sizeof(image));    bf->name = strdup(fname);    bpRead(filename ? filename : fname, &bmp);    free(filename);    bf->width = bmp.Width; bf->height = bmp.Height;    bf->size = bf->width * bf->height * skin->desktopbpp / 8;    if (skin->desktopbpp == 32)      bf->data = bmp.Image;    else {      const uint8_t *src[4] = { bmp.Image, NULL, NULL, NULL};      int src_stride[4] = { 4 * bmp.Width, 0, 0, 0 };      uint8_t *dst[4] = { NULL, NULL, NULL, NULL };      int dst_stride[4];      enum AVPixelFormat out_pix_fmt = PIX_FMT_NONE;      struct SwsContext *sws;      if      (skin->desktopbpp == 16) out_pix_fmt = PIX_FMT_RGB555;      else if (skin->desktopbpp == 24) out_pix_fmt = PIX_FMT_RGB24;      av_image_fill_linesizes(dst_stride, out_pix_fmt, bmp.Width);      sws = sws_getContext(bmp.Width, bmp.Height, PIX_FMT_RGB32,                           bmp.Width, bmp.Height, out_pix_fmt,                           SWS_POINT, NULL, NULL, NULL);      bf->data = malloc(bf->size);      dst[0] = bf->data;      sws_scale(sws, src, src_stride, 0, bmp.Height, dst, dst_stride);      sws_freeContext(sws);      free(bmp.Image);    }    return bf;}
开发者ID:AungWinnHtut,项目名称:mplayer,代码行数:64,


示例15: Java_com_leixiaohua1020_sffmpegandroiddecoder_MainActivity_decode

JNIEXPORT jint JNICALL Java_com_leixiaohua1020_sffmpegandroiddecoder_MainActivity_decode(		JNIEnv *env, jobject obj, jstring input_jstr, jstring output_jstr) {	AVFormatContext *pFormatCtx;	int i, videoindex;	AVCodecContext *pCodecCtx;	AVCodec *pCodec;	AVFrame *pFrame, *pFrameYUV;	uint8_t *out_buffer;	AVPacket *packet;	int y_size;	int ret, got_picture;	struct SwsContext *img_convert_ctx;	FILE *fp_yuv;	int frame_cnt;	clock_t time_start, time_finish;	double time_duration = 0.0;	char input_str[500] = { 0 };	char output_str[500] = { 0 };	char info[1000] = { 0 };	sprintf(input_str, "%s", (*env)->GetStringUTFChars(env, input_jstr, NULL));	sprintf(output_str, "%s",			(*env)->GetStringUTFChars(env, output_jstr, NULL));	//FFmpeg av_log() callback	av_log_set_callback(custom_log);	av_register_all();	avformat_network_init();	pFormatCtx = avformat_alloc_context();	if (avformat_open_input(&pFormatCtx, input_str, NULL, NULL) != 0) {		LOGE("Couldn't open input stream./n");		return -1;	}	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {		LOGE("Couldn't find stream information./n");		return -1;	}	videoindex = -1;	for (i = 0; i < pFormatCtx->nb_streams; i++){		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {			videoindex = i;			break;		}	}	if (videoindex == -1) {		LOGE("Couldn't find a video stream./n");		return -1;	}	pCodecCtx = pFormatCtx->streams[videoindex]->codec;	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);	if (pCodec == NULL) {		LOGE("Couldn't find Codec./n");		return -1;	}	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {		LOGE("Couldn't open codec./n");		return -1;	}	pFrame = av_frame_alloc();	pFrameYUV = av_frame_alloc();	out_buffer = (uint8_t *) av_malloc(			avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,					pCodecCtx->height));	avpicture_fill((AVPicture *) pFrameYUV, out_buffer, PIX_FMT_YUV420P,			pCodecCtx->width, pCodecCtx->height);	packet = (AVPacket *) av_malloc(sizeof(AVPacket));	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,			pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,			PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);	sprintf(info, "[Input     ]%s/n", input_str);	sprintf(info, "%s[Output    ]%s/n", info, output_str);	sprintf(info, "%s[Format    ]%s/n", info, pFormatCtx->iformat->name);	sprintf(info, "%s[Codec     ]%s/n", info, pCodecCtx->codec->name);	sprintf(info, "%s[Resolution]%dx%d/n", info, pCodecCtx->width,			pCodecCtx->height);	fp_yuv = fopen(output_str, "wb+");	if (fp_yuv == NULL) {		printf("Cannot open output file./n");		return -1;	}	frame_cnt = 0;	time_start = clock();	while (av_read_frame(pFormatCtx, packet) >= 0) {		if (packet->stream_index == videoindex) {//.........这里部分代码省略.........
开发者ID:wliuxingxiangyu,项目名称:SFFmpegAndroidDecoder,代码行数:101,


示例16: artwork_rescale

//.........这里部分代码省略.........    {      DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s/n", strerror(AVUNERROR(ret)));      ret = -1;      goto out_free_dst_ctx;    }  i_frame = avcodec_alloc_frame();  o_frame = avcodec_alloc_frame();  if (!i_frame || !o_frame)    {      DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame/n");      ret = -1;      goto out_free_frames;    }  ret = avpicture_get_size(dst->pix_fmt, src->width, src->height);  DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d/n", ret);  buf = (uint8_t *)av_malloc(ret);  if (!buf)    {      DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer/n");      ret = -1;      goto out_free_frames;    }  avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height);  swsctx = sws_getContext(src->width, src->height, src->pix_fmt,			  dst->width, dst->height, dst->pix_fmt,			  SWS_BICUBIC, NULL, NULL, NULL);  if (!swsctx)    {      DPRINTF(E_LOG, L_ART, "Could not get SWS context/n");      ret = -1;      goto out_free_buf;    }  /* Get frame */  have_frame = 0;  while (av_read_frame(src_ctx, &pkt) == 0)    {      if (pkt.stream_index != s)	{	  av_free_packet(&pkt);	  continue;	}#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 32)      /* FFmpeg 0.6 */      avcodec_decode_video2(src, i_frame, &have_frame, &pkt);#else      avcodec_decode_video(src, i_frame, &have_frame, pkt.data, pkt.size);#endif      break;    }  if (!have_frame)    {
开发者ID:juvenal,项目名称:forked-daapd,代码行数:67,


示例17: doTest

// test by ref -> src -> dst -> out & compare out against ref// ref & out are YV12static int doTest(uint8_t *ref[4], int refStride[4], int w, int h,                  enum PixelFormat srcFormat, enum PixelFormat dstFormat,                  int srcW, int srcH, int dstW, int dstH, int flags){    uint8_t *src[4] = {0};    uint8_t *dst[4] = {0};    uint8_t *out[4] = {0};    int srcStride[4], dstStride[4];    int i;    uint64_t ssdY, ssdU=0, ssdV=0, ssdA=0;    struct SwsContext *srcContext = NULL, *dstContext = NULL,                      *outContext = NULL;    int res;    res = 0;    for (i=0; i<4; i++) {        // avoid stride % bpp != 0        if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)            srcStride[i]= srcW*3;        else if (srcFormat==PIX_FMT_RGB48BE || srcFormat==PIX_FMT_RGB48LE)            srcStride[i]= srcW*6;        else            srcStride[i]= srcW*4;        if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)            dstStride[i]= dstW*3;        else if (dstFormat==PIX_FMT_RGB48BE || dstFormat==PIX_FMT_RGB48LE)            dstStride[i]= dstW*6;        else            dstStride[i]= dstW*4;        /* Image buffers passed into libswscale can be allocated any way you         * prefer, as long as they're aligned enough for the architecture, and         * they're freed appropriately (such as using av_free for buffers         * allocated with av_malloc). */        src[i]= av_mallocz(srcStride[i]*srcH);        dst[i]= av_mallocz(dstStride[i]*dstH);        out[i]= av_mallocz(refStride[i]*h);        if (!src[i] || !dst[i] || !out[i]) {            perror("Malloc");            res = -1;            goto end;        }    }    srcContext= sws_getContext(w, h, PIX_FMT_YUVA420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);    if (!srcContext) {        fprintf(stderr, "Failed to get %s ---> %s/n",                sws_format_name(PIX_FMT_YUVA420P),                sws_format_name(srcFormat));        res = -1;        goto end;    }    dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);    if (!dstContext) {        fprintf(stderr, "Failed to get %s ---> %s/n",                sws_format_name(srcFormat),                sws_format_name(dstFormat));        res = -1;        goto end;    }    outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUVA420P, flags, NULL, NULL, NULL);    if (!outContext) {        fprintf(stderr, "Failed to get %s ---> %s/n",                sws_format_name(dstFormat),                sws_format_name(PIX_FMT_YUVA420P));        res = -1;        goto end;    }//    printf("test %X %X %X -> %X %X %X/n", (int)ref[0], (int)ref[1], (int)ref[2],//        (int)src[0], (int)src[1], (int)src[2]);    sws_scale(srcContext, ref, refStride, 0, h   , src, srcStride);    sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);    sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);    ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);    if (hasChroma(srcFormat) && hasChroma(dstFormat)) {        //FIXME check that output is really gray        ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);        ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);    }
开发者ID:119,项目名称:dropcam_for_iphone,代码行数:88,


示例18: _tmain

int _tmain(int argc, _TCHAR* argv[]){	AVFormatContext *pFormatCtx;	char *filepath = "Titanic.ts";	char *file_out = "Titanic.yuv";	char *file_out1 = "Titanic.h264";	FILE *fp_out;	FILE *fp_out1;	errno_t err,err1;	err = fopen_s(&fp_out, file_out, "wb+");	if (err != 0)	{		printf("The file 'crt_fopen_s.c' was opened/n");		return -1;	}	err1 = fopen_s(&fp_out1, file_out1, "wb+");	if (err1 != 0)	{		printf("The file 'crt_fopen_s.c' was opened/n");		return -1;	}	av_register_all();    //注册所有组件	avformat_network_init();	pFormatCtx = avformat_alloc_context();   //开辟内存	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) < 0) //打开输入视频文件	{		printf("Can't open the input stream./n");		return -1;	}	if (avformat_find_stream_info(pFormatCtx,NULL)<0)     //判断文件流,视频流还是音频流	{		printf("Can't find the stream information!/n");		return -1;	}	int i, index_video = -1;	for (i = 0; i < pFormatCtx->nb_streams; i++)	{		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)      //如果是视频流,则记录存储。		{			index_video = i;			break;		}			}	if (index_video == -1)	{		printf("Can't find a video stream;/n");		return -1;	}	AVCodecContext *pCodecCtx = pFormatCtx->streams[index_video]->codec;	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);     //查找解码器	if (pCodec == NULL)	{		printf("Can't find a decoder!/n");		return -1;	}	//if (pCodecCtx->codec_id == AV_CODEC_ID_H264)	//{	//	av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);	//	av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);	//}	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)   //打开编码器	{		printf("Can't open the decoder!/n");		return -1;	}	AVFrame *pFrame = av_frame_alloc();  //this only allocates the AVFrame itself, not the data buffers	AVFrame *pFrameYUV = av_frame_alloc();	uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height));  //开辟缓冲区	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);//帧和申请的内存结合	AVPacket *pkt = (AVPacket *)av_malloc(sizeof(AVPacket));;	av_init_packet(pkt);		SwsContext * img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,		pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);		int frame_cnt = 0;	int ret;	int get_frame;	int y_size = pCodecCtx->width*pCodecCtx->height;	while (av_read_frame(pFormatCtx,pkt) >=0  )	{		if (pkt->stream_index == index_video)		{			fwrite(pkt->data,1,pkt->size,fp_out1);			if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)			{				printf("Decode Error!/n");//.........这里部分代码省略.........
开发者ID:zqb1992,项目名称:ffmpeg_decoder_video,代码行数:101,


示例19: main

//.........这里部分代码省略.........    if (pCodec == NULL) {        fprintf(stderr, "Unsupported codec!/n");        return -1; // Codec not found    }    pCodecCtx = avcodec_alloc_context3(pCodec);    avcodec_parameters_to_context(pCodecCtx, pCodecParam);    // Open codec    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)        return -1; // Could not open codec    // Allocate video frame    pFrame = av_frame_alloc();    // Make a screen to put our video#ifndef __DARWIN__    pWindows = SDL_CreateWindow(argv[1],SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,pCodecParam->width, pCodecParam->height,SDL_WINDOW_BORDERLESS|SDL_WINDOW_RESIZABLE);#else    screen = SDL_SetVideoMode(pCodecParam->width, pCodecParam->height, 24, 0);#endif    if (!pWindows) {        fprintf(stderr, "SDL: could not set video mode - exiting/n");        exit(1);    }        // Allocate a place to put our YUV image on that screen    pRenderer = SDL_CreateRenderer(pWindows, -1, 0);    if (!pRenderer) {        fprintf(stderr, "SDL: could not create renderer - exiting/n");        exit(1);    }    pTexture = SDL_CreateTexture(pRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecParam->width, pCodecParam->height);    sws_ctx =        sws_getContext        (        pCodecParam->width,        pCodecParam->height,        (AVPixelFormat)pCodecParam->format,        pCodecParam->width,        pCodecParam->height,        AV_PIX_FMT_YUV420P,        SWS_BILINEAR,        NULL,        NULL,        NULL        );    pict = av_frame_alloc();    if (pict == nullptr){        exit(1);    }    if (av_image_alloc(pict->data, pict->linesize,        pCodecParam->width, pCodecParam->height,        (AVPixelFormat)pCodecParam->format, 1) < 0){        exit(1);    }    // Read frames and save first five frames to disk    i = 0;    while (av_read_frame(pFormatCtx, &packet) >= 0) {        // Is this a packet from the video stream?        if (packet.stream_index == videoStream) {            // Decode video frame            //avcodec_decode_video2 is deprecated Use avcodec_send_packet() and avcodec_receive_frame().            send_packet = avcodec_send_packet(pCodecCtx, &packet);            receive_frame = avcodec_receive_frame(pCodecCtx, pFrame);
开发者ID:HeartUnchange,项目名称:ffmpeg-study-note,代码行数:67,


示例20: FFMpegUtils_print

static void FFMpegUtils_print(JNIEnv *env, jobject obj, jint pAVFormatContext) {	int i;	AVCodecContext *pCodecCtx;	AVFrame *pFrame;	AVCodec *pCodec;	AVFormatContext *pFormatCtx = (AVFormatContext *) pAVFormatContext;	struct SwsContext *img_convert_ctx;	LOGD("playing");	// Find the first video stream	int videoStream = -1;	for (i = 0; i < pFormatCtx->nb_streams; i++)		if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {			videoStream = i;			break;		}	if (videoStream == -1) {		jniThrowException(env,						  "java/io/IOException",						  "Didn't find a video stream");		return;	}	// Get a pointer to the codec context for the video stream	pCodecCtx = pFormatCtx->streams[videoStream]->codec;	// Find the decoder for the video stream	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);	if (pCodec == NULL) {		jniThrowException(env,						  "java/io/IOException",						  "Unsupported codec!");		return; // Codec not found	}	// Open codec	if (avcodec_open(pCodecCtx, pCodec) < 0) {		jniThrowException(env,						  "java/io/IOException",						  "Could not open codec");		return; // Could not open codec	}	// Allocate video frame	pFrame = avcodec_alloc_frame();	// Allocate an AVFrame structure	AVFrame *pFrameRGB = avcodec_alloc_frame();	if (pFrameRGB == NULL) {		jniThrowException(env,						  "java/io/IOException",						  "Could allocate an AVFrame structure");		return;	}	uint8_t *buffer;	int numBytes;	// Determine required buffer size and allocate buffer	numBytes = avpicture_get_size(PIX_FMT_RGB565, pCodecCtx->width,			pCodecCtx->height);	buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));	// Assign appropriate parts of buffer to image planes in pFrameRGB	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset	// of AVPicture	avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB565,			pCodecCtx->width, pCodecCtx->height);	int w = pCodecCtx->width;	int h = pCodecCtx->height;	img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt, w, h,			PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);	int frameFinished;	AVPacket packet;	i = 0;	int result = -1;	while ((result = av_read_frame(pFormatCtx, &packet)) >= 0) {		// Is this a packet from the video stream?		if (packet.stream_index == videoStream) {			// Decode video frame			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,					packet.data, packet.size);			// Did we get a video frame?			if (frameFinished) {				// Convert the image from its native format to RGB				sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,						pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);				//FFMpegUtils_saveFrame(pFrameRGB, pCodecCtx->width,				//		pCodecCtx->height, i);				FFMpegUtils_handleOnVideoFrame(env, obj, pFrame, pCodecCtx->width,						pCodecCtx->height);				i++;			}		}		// Free the packet that was allocated by av_read_frame//.........这里部分代码省略.........
开发者ID:WangCrystal,项目名称:FFplayer,代码行数:101,


示例21: main

//.........这里部分代码省略.........        fprintf(stderr, "Unsupported codec!/n");        return -1; // Codec not found    }    // Open codec    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)        return -1; // Could not open codec    // Allocate video frame    pFrame=av_frame_alloc();    // Make a screen to put our video    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);    if(!screen) {        fprintf(stderr, "SDL: could not set video mode - exiting/n");        exit(1);    }    // Allocate a place to put our YUV image on that screen    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,            pCodecCtx->height,            SDL_YV12_OVERLAY,            screen);    // Read frames and save first five frames to disk    i=0;    while(av_read_frame(pFormatCtx, &packet)>=0) {        // Is this a packet from the video stream?        if(packet.stream_index==videoStream) {            // Decode video frame            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,                    &packet);            // Did we get a video frame?            if(frameFinished) {                SDL_LockYUVOverlay(bmp);                AVPicture pict;                pict.data[0] = bmp->pixels[0];                pict.data[1] = bmp->pixels[2];                pict.data[2] = bmp->pixels[1];                pict.linesize[0] = bmp->pitches[0];                pict.linesize[1] = bmp->pitches[2];                pict.linesize[2] = bmp->pitches[1];                // Convert the image into YUV format that SDL uses                img_convert_ctx = sws_getContext(pCodecCtx->width,                        pCodecCtx->height,                        pCodecCtx->pix_fmt,                        pCodecCtx->width,                        pCodecCtx->height,                        PIX_FMT_YUV420P,                        SWS_BICUBIC,NULL,                        NULL,NULL);                sws_scale(img_convert_ctx, pFrame->data,                        pFrame->linesize,                        0,                        pFrame->height,                        pict.data,                        pict.linesize);                SDL_UnlockYUVOverlay(bmp);                rect.x = 0;                rect.y = 0;                rect.w = pCodecCtx->width;                rect.h = pCodecCtx->height;                SDL_DisplayYUVOverlay(bmp, &rect);            }        }        // Free the packet that was allocated by av_read_frame        av_free_packet(&packet);        SDL_PollEvent(&event);        switch(event.type) {            case SDL_QUIT:                SDL_Quit();                exit(0);                break;            default:                break;        }    }    // Free the YUV frame    av_free(pFrame);    // Close the codec    avcodec_close(pCodecCtx);    // Close the video file    avformat_close_input(&pFormatCtx);    return 0;}
开发者ID:cuijinquan,项目名称:ffmpeg_tutorial,代码行数:101,


示例22: main

//.........这里部分代码省略.........			   free(ScreenData);		   ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY);		   AVPacket pkt;		   clock_t start_t = GetTickCount();		   long long wait_time = 0;		   uint64_t total_size;    	   while(1) {			hOld = SelectObject(hdcMem, hBitmap);			BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY);			SelectObject(hdcMem, hOld);			GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);			//calculate the bytes needed for the output image			int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, new_width, new_height);			//create buffer for the output image			uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);			//create ffmpeg frame structures.  These do not allocate space for image data,			//just the pointers and other information about the image.			AVFrame* inpic = avcodec_alloc_frame();			AVFrame* outpic = avcodec_alloc_frame();			//this will set the pointers in the frame structures to the right points in			//the input and output buffers.			avpicture_fill((AVPicture*)inpic, ScreenData, AV_PIX_FMT_RGB32, ScreenX, ScreenY);			avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, new_width, new_height);			//create the conversion context			struct SwsContext *fooContext = sws_getContext(ScreenX, ScreenY, AV_PIX_FMT_RGB32, new_width, new_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);			//perform the conversion			sws_scale(fooContext, inpic->data, inpic->linesize, 0, ScreenY, outpic->data, outpic->linesize);						// Initialize a new frame			AVFrame* newFrame = avcodec_alloc_frame();			int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);			uint8_t* picture_buf = av_malloc(size);			avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);			// Copy only the frame content without additional fields			av_picture_copy((AVPicture*) newFrame, (AVPicture*) outpic, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);			// encode the image			int got_output;			av_init_packet(&pkt);			pkt.data = NULL; // packet data will be allocated by the encoder			pkt.size = 0;			// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS')			newFrame->pts = frame_count2;			ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);			if (ret < 0) {				fprintf(stderr, "Error encoding video frame: %s/n", av_err2str(ret));				exit(1);			}			if (got_output) {				if (video_st->codec->coded_frame->key_frame)
开发者ID:husman,项目名称:Development-Side-Projects,代码行数:67,


示例23: main

//.........这里部分代码省略.........    char allmovies_query[150];    memset(allmovies_query, 0, 150);    fps = (double)pFormatCtx->streams[videoStream]->r_frame_rate.num/(double)pFormatCtx->streams[videoStream]->r_frame_rate.den;    //if (repeated) {    //  filename_suffix = (int)tv.tv_sec;    //  sprintf(filename, "%s_%d", filename, filename_suffix);    //  sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), filename_suffix);    //} else {    sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), (int)tv.tv_sec);    //}    retval = sqlite3_exec(handle,allmovies_query,0,0,0);    printf("%d %d/n",pAudioCodecCtx->sample_rate,pAudioCodecCtx->channels);    i = 0;    unsigned int offset = 0; // bytes    //fftw_complex *in;    int totalSamples = 0;    //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);    int counter = 0;    float audioTime = 0.0;    while(av_read_frame(pFormatCtx, &packet)>=0) {        // Decode video        if(packet.stream_index==videoStream) {            // Decode video frame            avcodec_decode_video2(pVideoCodecCtx, pVideoFrame, &frameFinished, &packet);            // Did we get a video frame?            if(frameFinished) {                if (pVideoCodecCtx->pix_fmt != PIX_FMT_YUV420P) {                    // Convert the image from its native format to YUV (PIX_FMT_YUV420P)                    //img_convert((AVPicture *)pVideoFrameYUV, PIX_FMT_YUV420P, (AVPicture*)pVideoFrame, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height);                    sws_context = sws_getContext(pVideoCodecCtx->width, pVideoCodecCtx->height, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);                    sws_scale(sws_context, pVideoFrame->data, pVideoFrame->linesize, 0, pVideoCodecCtx->height, pVideoFrameYUV->data, pVideoFrameYUV->linesize);                    sws_freeContext(sws_context);                    retval = AvgFrameImport(pVideoFrameYUV, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);                } else {                    retval = AvgFrameImport(pVideoFrame, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);                }            }        }        // Decode audio        // http://qtdvd.com/guides/ffmpeg.html#decode        if (packet.stream_index == audioStream) {            offset = 0;            int frameSize;            int length = 0;            memset(audioBuffer, 0, sizeof(audioBuffer));            while (packet.size > 0) {                //memset(audioBuffer, 0, sizeof(audioBuffer));                frameSize = numAudioBytes;                //Copy decoded information into audioBuffer                //frameSize gets set as the decoded frameSize, in bytes                length = avcodec_decode_audio3(pAudioCodecCtx, audioBuffer, &frameSize, &packet);                if (length <= 0) { // Error, see if we can recover.                    packet.size--;                    packet.data++;                }                else {                    //Slide pointer to next frame and update size                    printf("read %d bytes/n", length);
开发者ID:straypacket,项目名称:videofingerprinting,代码行数:67,


示例24: stream_component_open

int stream_component_open(VideoState* is, int stream_index) {	AVFormatContext* pFormatCtx = is->pFormatCtx;	AVCodecContext* codecCtx;	AVCodec* codec;	SDL_AudioSpec wanted_spec, spec;	if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {		return -1;	}	codec = avcodec_find_decoder(			pFormatCtx->streams[stream_index]->codec->codec_id);	if (!codec) {		fprintf(stderr, "Unsupported codec!/n");		return -1;	}	codecCtx = avcodec_alloc_context3(codec);	if (avcodec_copy_context(codecCtx, pFormatCtx->streams[stream_index]->codec)			!= 0) {		fprintf(stderr, "Couldn't copying codec context");		return -1;	}	if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {		wanted_spec.freq = codecCtx->sample_rate;		wanted_spec.format = AUDIO_S16SYS;		wanted_spec.channels = codecCtx->channels;		wanted_spec.silence = 0;		wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;		wanted_spec.callback = audio_callback;		wanted_spec.userdata = is;		if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {			fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());			return -1;		}		is->au_convert_ctx = swr_alloc();		uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;		AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;		int out_sample_rate = 44100;		int64_t in_channel_layout = av_get_default_channel_layout(				codecCtx->channels);		is->au_convert_ctx = swr_alloc_set_opts(is->au_convert_ctx,				out_channel_layout, out_sample_fmt, out_sample_rate,				in_channel_layout, codecCtx->sample_fmt, codecCtx->sample_rate,				0,				NULL);		swr_init(is->au_convert_ctx);	}	if (avcodec_open2(codecCtx, codec, NULL) < 0) {		fprintf(stderr, "Unsupported codec!/n");		return -1;	}	switch (codecCtx->codec_type) {	case AVMEDIA_TYPE_AUDIO:		is->audioStream = stream_index;		is->audio_st = pFormatCtx->streams[stream_index];		is->audio_ctx = codecCtx;		is->audio_buf_size = 0;		is->audio_buf_index = 0;		memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));		packet_queue_init(&is->audioq);		SDL_PauseAudio(0);		break;	case AVMEDIA_TYPE_VIDEO:		is->videoStream = stream_index;		is->video_st = pFormatCtx->streams[stream_index];		is->video_ctx = codecCtx;		Hex2Str(codecCtx->extradata, codecCtx->extradata_size);		printf("AVCodecID:%d/n", codec->id);		is->frame_timer = (double) av_gettime() / 1000000.0;		is->frame_last_delay = 40e-3;		is->video_current_pts_time = av_gettime();		packet_queue_init(&is->videoq);		is->video_tid = SDL_CreateThread(video_thread, "video_thread", is);		is->sws_ctx = sws_getContext(is->video_ctx->width,				is->video_ctx->height, is->video_ctx->pix_fmt,				is->video_ctx->width, is->video_ctx->height, AV_PIX_FMT_YUV420P,				SWS_BILINEAR, NULL, NULL, NULL);		break;	default:		break;	}}
开发者ID:soffio,项目名称:FFmpegTutorial,代码行数:90,


示例25: stream_component_open

int stream_component_open(VideoState *is, int stream_index) {	AVFormatContext *pFormatCtx = is->pFormatCtx;	AVCodecContext *codecCtx = NULL;	AVCodec *codec = NULL;	SDL_AudioSpec wanted_spec, spec;	if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {		return -1;	}	codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codec->codec_id);	if (!codec) {		fprintf(stderr, "Unsupported codec!/n");		return -1;	}	codecCtx = avcodec_alloc_context3(codec);	if (avcodec_copy_context(codecCtx, pFormatCtx->streams[stream_index]->codec) != 0) {		fprintf(stderr, "Couldn't copy codec context");		return -1; // Error copying codec context	}	if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {		// Set audio settings from codec info		wanted_spec.freq = codecCtx->sample_rate;		wanted_spec.format = AUDIO_S16SYS;		wanted_spec.channels = codecCtx->channels;		wanted_spec.silence = 0;		wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;		wanted_spec.callback = audio_callback;		wanted_spec.userdata = is;		if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {			fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());			return -1;		}		is->audio_hw_buf_size = spec.size;	}	if (avcodec_open2(codecCtx, codec, NULL) < 0) {		fprintf(stderr, "Unsupported codec!/n");		return -1;	}	switch (codecCtx->codec_type) {	case AVMEDIA_TYPE_AUDIO:		is->audioStream = stream_index;		is->audio_st = pFormatCtx->streams[stream_index];		is->audio_ctx = codecCtx;		is->audio_buf_size = 0;		is->audio_buf_index = 0;		memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));		packet_queue_init(&is->audioq);		// 需要先把解出来的 raw audio 转换成 SDL 需要的格式		// 根据 raw audio 的格式 和 SDL 的格式设置 swr_ctx		is->swr_ctx = swr_alloc_set_opts(NULL,			AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, is->audio_ctx->sample_rate,			av_get_default_channel_layout(is->audio_ctx->channels), is->audio_ctx->sample_fmt, is->audio_ctx->sample_rate,			0, NULL);		//初始化 swr_ctx		swr_init(is->swr_ctx);		SDL_PauseAudio(0);		break;	case AVMEDIA_TYPE_VIDEO:		is->videoStream = stream_index;		is->video_st = pFormatCtx->streams[stream_index];		is->video_ctx = codecCtx;		is->frame_timer = (double)av_gettime() / 1000000.0;		is->frame_last_delay = 40e-3;		packet_queue_init(&is->videoq);		is->video_tid = SDL_CreateThread(video_thread, is);		is->sws_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height,			is->video_ctx->pix_fmt, is->video_ctx->width,			is->video_ctx->height, PIX_FMT_YUV420P,			SWS_BILINEAR, NULL, NULL, NULL			);		break;	default:		break;	}}
开发者ID:shileiz,项目名称:notes,代码行数:84,


示例26: av_init_packet

/** Decoder. The supplied buffer should contain an H264 video frame, then DecodeFrame  will pass the buffer to the avcode_decode_video2 method. Once decoded we then  use the get_picture command to convert the frame to RGB24.  The RGB24 buffer is  then used to create a FrameInfo object which is placed on our video queue.  /param pBuffer Memory buffer holding an H264 frame  /param size Size of the buffer*/FrameInfo* CVideoDecoder::DecodeFrame(unsigned char *pBuffer, int size) {		FrameInfo	*p_block=NULL;        uint8_t startCode4[] = {0x00, 0x00, 0x00, 0x01};        int got_frame = 0;        AVPacket packet;		//Initialize optional fields of a packet with default values. 		av_init_packet(&packet);		//set the buffer and the size	        packet.data = pBuffer;        packet.size = size;        while (packet.size > sizeof(startCode4)) 		{			//Decode the video frame of size avpkt->size from avpkt->data into picture. 			int len = avcodec_decode_video2(m_codecContext, m_frame, &got_frame, &packet);			if(len<0)			{				TRACE_ERROR("Failed to decode video len=%d",len);				break;			}			//sometime we dont get the whole frame, so move			//forward and try again            if ( !got_frame )			{				packet.size -= len;				packet.data += len;				continue;			}			//allocate a working frame to store our rgb image            AVFrame * rgb = avcodec_alloc_frame();			if(rgb==NULL)			{				TRACE_ERROR("Failed to allocate new av frame");				return NULL;			}			//Allocate and return an SwsContext. 			struct SwsContext * scale_ctx = sws_getContext(m_codecContext->width,					m_codecContext->height,				m_codecContext->pix_fmt,				m_codecContext->width,				m_codecContext->height,				PIX_FMT_BGR24,				SWS_BICUBIC,				NULL,				NULL,				NULL);            if (scale_ctx == NULL)			{				TRACE_ERROR("Failed to get context");				continue;			}			//Calculate the size in bytes that a picture of the given width and height would occupy if stored in the given picture format. 			int numBytes = avpicture_get_size(PIX_FMT_RGB24,				m_codecContext->width,				m_codecContext->height);									try{				//create one of our FrameInfo objects				p_block = FrameNew(numBytes);				if(p_block==NULL){						//cleanup the working buffer					av_free(rgb);					sws_freeContext(scale_ctx);					scale_ctx=NULL;					return NULL;				}				//Fill our frame buffer with the rgb image				avpicture_fill((AVPicture*)rgb, 					(uint8_t*)p_block->pdata,					PIX_FMT_RGB24,					m_codecContext->width,					m_codecContext->height);									//Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst. 				sws_scale(scale_ctx, 					m_frame->data, 					m_frame->linesize, 					0, 					m_codecContext->height, 					rgb->data, 					rgb->linesize);									//.........这里部分代码省略.........
开发者ID:dhorth,项目名称:LiveProxy,代码行数:101,


示例27: main

//.........这里部分代码省略.........  }  av_dump_format(pFmtCtx, 0, filename,0);  int i;  int videoStream;  AVCodecContext *pCodecCtx;  videoStream = -1;  for(i=0; i<pFmtCtx->nb_streams; i++) {    if(pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {      videoStream = i;      break;    }  }  if(videoStream == -1) {    fprintf(stderr,"No stream found!/n");     return -1;  }  pCodecCtx = pFmtCtx->streams[videoStream]->codec;  /* find suitable codec */   AVCodec * pCodec;    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);  if(!pCodec) {    fprintf(stderr,"No suitable decoder found!/n");     return -1;  }  if(avcodec_open2(pCodecCtx,pCodec,NULL)<0) {    fprintf(stderr,"Could not open codec!/n");     return -1;  }  AVFrame *pFrame;  AVFrame *pPict;  /* allocate data structs */  pFrame = avcodec_alloc_frame();  pPict  = avcodec_alloc_frame();  uint8_t *buffer;  int szPict;;  int sw,sh;  sw = pCodecCtx->width;  sh = pCodecCtx->height;  // allocate buffer of picture size  szPict = avpicture_get_size(PIX_FMT_RGB24, sw,sh);  buffer = (uint8_t *)av_malloc(szPict*sizeof(uint8_t));  /* associate frame with out buffer */  avpicture_fill( (AVPicture *)pPict,buffer,PIX_FMT_RGB24, sw, sh);  int frameFinished;  AVPacket packet;  /* init scale context to scale to terminal resolution */  pSwsCtx = sws_getContext(sw,sh,pCodecCtx->pix_fmt,tw,th,PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);  i=0;  /* read as long we have packets in the stream */  while(av_read_frame(pFmtCtx,&packet)>=0) {         /* we only need packets of our video stream*/    if(packet.stream_index == videoStream) {            /* decode video frame */      avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,                            &packet);      if(frameFinished) {        /* scale, display and sleep for ~30ms*/        sws_scale(pSwsCtx,pFrame->data, pFrame->linesize,0,sh,pPict->data, pPict->linesize);        ascii_art(pPict);        usleep(30000);          }    }    /* free current packet struct */    av_free_packet(&packet);  }  /* tidy up.. */  av_free(buffer);  av_free(pPict);  av_free(pFrame);  avcodec_close(pCodecCtx);  avformat_free_context(pFmtCtx);  return 0;}
开发者ID:phreax,项目名称:asciiplayer,代码行数:101,


示例28: main

int main(int argc, char* argv[]){	AVFormatContext	*pFormatCtx;	int				i, videoindex;	AVCodecContext	*pCodecCtx;	AVCodec			*pCodec;	AVFrame	*pFrame,*pFrameYUV;	uint8_t *out_buffer;	AVPacket *packet;	int y_size;	int ret, got_picture;	struct SwsContext *img_convert_ctx;	char filepath[]="bigbuckbunny_480x272.h265";	//SDL---------------------------	int screen_w=0,screen_h=0;	SDL_Window *screen; 	SDL_Renderer* sdlRenderer;	SDL_Texture* sdlTexture;	SDL_Rect sdlRect;	FILE *fp_yuv;	av_register_all();	avformat_network_init();	pFormatCtx = avformat_alloc_context();	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){		printf("Couldn't open input stream./n");		return -1;	}	if(avformat_find_stream_info(pFormatCtx,NULL)<0){		printf("Couldn't find stream information./n");		return -1;	}	videoindex=-1;	for(i=0; i<pFormatCtx->nb_streams; i++) 		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){			videoindex=i;			break;		}	if(videoindex==-1){		printf("Didn't find a video stream./n");		return -1;	}	pCodecCtx=pFormatCtx->streams[videoindex]->codec;	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);	if(pCodec==NULL){		printf("Codec not found./n");		return -1;	}	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){		printf("Could not open codec./n");		return -1;	}		pFrame=av_frame_alloc();	pFrameYUV=av_frame_alloc();	out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);	packet=(AVPacket *)av_malloc(sizeof(AVPacket));	//Output Info-----------------------------	printf("--------------- File Information ----------------/n");	av_dump_format(pFormatCtx,0,filepath,0);	printf("-------------------------------------------------/n");	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); #if OUTPUT_YUV420P     fp_yuv=fopen("output.yuv","wb+");  #endif  		if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  		printf( "Could not initialize SDL - %s/n", SDL_GetError()); 		return -1;	} 	screen_w = pCodecCtx->width;	screen_h = pCodecCtx->height;	//SDL 2.0 Support for multiple windows	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,		screen_w, screen_h,		SDL_WINDOW_OPENGL);	if(!screen) {  		printf("SDL: could not create window - exiting:%s/n",SDL_GetError());  		return -1;	}	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  	//IYUV: Y + U + V  (3 planes)	//YV12: Y + V + U  (3 planes)	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  	sdlRect.x=0;	sdlRect.y=0;	sdlRect.w=screen_w;	sdlRect.h=screen_h;//.........这里部分代码省略.........
开发者ID:houtuiwang,项目名称:FFmpeg,代码行数:101,



注:本文中的sws_getContext函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ sws_scale函数代码示例
C++ sws_getCachedContext函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。