这篇教程C++ url_fopen函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中url_fopen函数的典型用法代码示例。如果您正苦于以下问题:C++ url_fopen函数的具体用法?C++ url_fopen怎么用?C++ url_fopen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了url_fopen函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: yuv_readstatic int yuv_read(ByteIOContext *f, int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque){ ByteIOContext pb1, *pb = &pb1; int img_size, ret; char fname[1024], *p; int size; URLContext *h; AVImageInfo info1, *info = &info1; img_size = url_fsize(f); /* XXX: hack hack */ h = url_fileno(f); url_get_filename(h, fname, sizeof(fname)); if (infer_size(&info->width, &info->height, img_size) < 0) { return AVERROR_IO; } info->pix_fmt = PIX_FMT_YUV420P; ret = alloc_cb(opaque, info); if (ret) return ret; size = info->width * info->height; p = strrchr(fname, '.'); if (!p || p[1] != 'Y') return AVERROR_IO; get_buffer(f, info->pict.data[0], size); p[1] = 'U'; if (url_fopen(pb, fname, URL_RDONLY) < 0) return AVERROR_IO; get_buffer(pb, info->pict.data[1], size / 4); url_fclose(pb); p[1] = 'V'; if (url_fopen(pb, fname, URL_RDONLY) < 0) return AVERROR_IO; get_buffer(pb, info->pict.data[2], size / 4); url_fclose(pb); return 0;}
开发者ID:VoxOx,项目名称:VoxOx,代码行数:48,
示例2: LOGbool AVFormatWriter::OpenFile(void){ if (!(m_fmt.flags & AVFMT_NOFILE)) { if (url_fopen(&m_ctx->pb, m_filename.toAscii().constData(), URL_WRONLY) < 0) { LOG(VB_RECORD, LOG_ERR, LOC + "OpenFile(): url_fopen() failed"); return false; } } m_ringBuffer = RingBuffer::Create(m_filename, true); if (!m_ringBuffer) { LOG(VB_RECORD, LOG_ERR, LOC + "OpenFile(): RingBuffer::Create() failed"); return false; } m_avfRingBuffer = new AVFRingBuffer(m_ringBuffer); URLContext *uc = (URLContext *)m_ctx->pb->opaque; uc->prot = &AVF_RingBuffer_Protocol; uc->priv_data = (void *)m_avfRingBuffer; av_write_header(m_ctx); return true;}
开发者ID:StefanRoss,项目名称:mythtv,代码行数:30,
示例3: mainint main(int argc, char **argv){ URL_FILE *handle; char buffer[BUFSIZE]; if(argc > 1) strcpy(BASE,argv[1]); else { fprintf(stderr, "Usage: %s BaseURL/n",argv[0]); exit(1); } handle = url_fopen(BASE, "r"); if (!handle) { fprintf(stderr,"couldn't url_fopen() %s/n", BASE); return 2; } while(!url_feof(handle)) { url_fgets(buffer,sizeof(buffer),handle); strlower(buffer); fputs(buffer,stdout); char *cur, link[BUFSIZE], full_link[BUFSIZE]; cur = buffer; while ((cur = nextURL(cur)) != NULL) { getURL(cur, link, BUFSIZE-1); normalise(link, full_link, BUFSIZE-1); printf("%s/n",full_link); cur += strlen(link); } } url_fclose(handle); return 0;}
开发者ID:Cross777,项目名称:COMP1927,代码行数:34,
示例4: advance_fragment/// Move on to next fragment IF NEEDED (changes file)/// This method won't change file if it's not ready tovoid advance_fragment(EncoderJob &jobSpec) { // Check to see if this frame should be split. if (should_advance(jobSpec)) { jobSpec.SplitNextKey = (jobSpec.a_pts > jobSpec.v_pts) ? (jobSpec.a_pts) : (jobSpec.v_pts); jobSpec.SegmentNumber++;#ifdef NEW_M2TS jobSpec.p->CloseFile(); sprintf(jobSpec.oc->filename, "%s-%05u.ts", jobSpec.BaseDirectory, jobSpec.SegmentNumber); int track_ids[2] = {120, 121}; uint8_t track_types[2] = {Pests::TT_H264, Pests::TT_MpegAudio}; jobSpec.p->StartFile(jobSpec.oc->filename, track_ids, track_types, 2);#else url_fclose(jobSpec.oc->pb); sprintf(jobSpec.oc->filename, "%s-%05u.ts", jobSpec.BaseDirectory, jobSpec.SegmentNumber); if (url_fopen(&jobSpec.oc->pb, jobSpec.oc->filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'/n", jobSpec.oc->filename); jobSpec.IsValid = false; return; } av_write_header(jobSpec.oc);#endif }}
开发者ID:i-e-b,项目名称:FFmpegControl,代码行数:29,
示例5: img_read_packetstatic int img_read_packet(AVFormatContext *s1, AVPacket *pkt){ VideoData *s = s1->priv_data; char filename[1024]; int i; int size[3]={0}, ret[3]={0}; ByteIOContext f1[3], *f[3]= {&f1[0], &f1[1], &f1[2]}; AVCodecContext *codec= s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s1->loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; } if (av_get_frame_filename(filename, sizeof(filename), s->path, s->img_number)<0 && s->img_number > 1) return AVERROR_IO; for(i=0; i<3; i++){ if (url_fopen(f[i], filename, URL_RDONLY) < 0) return AVERROR_IO; size[i]= url_fsize(f[i]); if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = &s1->pb; if (url_feof(f[0])) return AVERROR_IO; size[0]= 4096; } av_new_packet(pkt, size[0] + size[1] + size[2]); pkt->stream_index = 0; pkt->flags |= PKT_FLAG_KEY; pkt->size= 0; for(i=0; i<3; i++){ if(size[i]){ ret[i]= get_buffer(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) url_fclose(f[i]); if(ret[i]>0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) { av_free_packet(pkt); return AVERROR_IO; /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; }}
开发者ID:BOTCrusher,项目名称:sagetv,代码行数:60,
示例6: url_is_file_list/*=======================================================================================*/int url_is_file_list(ByteIOContext *s,const char *filename){ int ret; list_demux_t *demux; ByteIOContext *lio=s; int64_t *oldpos=0; if(!lio) { ret=url_fopen(&lio,filename,AVIO_FLAG_READ); if(ret!=0) { return AVERROR(EIO); } } else{ oldpos=url_ftell(lio); } demux=probe_demux(lio,filename); if(lio!=s) { url_fclose(lio); } else { url_fseek(lio, oldpos, SEEK_SET); } return demux!=NULL?100:0;}
开发者ID:mazen912,项目名称:vendor_ffmpeg,代码行数:29,
示例7: OpenFlvFileUINT CFlvUtils::OpenFlvFile(){ HRESULT hr = S_OK; if (!m_szFlvFile) hr = E_FAIL; if (SUCCEEDED(hr)) { if (!(m_pAVOutputFormat->flags & AVFMT_NOFILE)) { if (url_fopen(&m_pAVFormatContext->pb, m_szFlvFile, URL_WRONLY) < 0) { hr = E_FAIL; _ftprintf(stderr, _T("Error in CFlvUtils::OpenFlvFile():/n Could not open '%s'!/n"), m_szFlvFile); // TODO: error handling? } } // Write the stream header, if any av_write_header(m_pAVFormatContext); } return hr;}
开发者ID:identity0815,项目名称:os45,代码行数:25,
示例8: list_open_internetstatic int list_open_internet(ByteIOContext **pbio,struct list_mgt *mgt,const char *filename, int flags){ list_demux_t *demux; int ret; ByteIOContext *bio; ret=url_fopen(&bio,filename,flags); if(ret!=0) { return AVERROR(EIO); } demux=probe_demux(bio,filename); if(!demux) { ret=-1; goto error; } ret=demux->parser(mgt,bio); if(ret<=0) { ret=-1; goto error; } *pbio=bio; return 0;error: if(bio) url_fclose(bio); return ret;}
开发者ID:Pivosgroup,项目名称:aml-original-linux-buildroot,代码行数:29,
示例9: read_pagechar * read_page(char * url){ URL_FILE * page = url_fopen(url, "rb"); if (page == NULL) return NULL; char * result = (char *)malloc(1024 * 1024); if (result == NULL) { url_fclose(page); return NULL; } result[0] = '/0'; if (page != NULL) { char buffer[8]; int n = 0; n = url_fread(buffer, 1, 8, page); while(n > 0) { strncat(result, buffer, n); n = url_fread(buffer, 1, 8, page); } } url_fclose(page); return result;}
开发者ID:Myrcellion,项目名称:flight_weather_reader,代码行数:35,
示例10: img_write_packetstatic int img_write_packet(AVFormatContext *s, AVPacket *pkt){ VideoData *img = s->priv_data; ByteIOContext pb1, *pb; char filename[1024]; if (!img->is_pipe) { if (get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) return AVERROR_IO; pb = &pb1; if (url_fopen(pb, filename, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = &s->pb; } put_buffer(pb, pkt->data, pkt->size); put_flush_packet(pb); if (!img->is_pipe) { url_fclose(pb); } img->img_number++; return 0;}
开发者ID:DmitrySigaev,项目名称:DSMedia,代码行数:26,
示例11: guess_formatbool CFFMPEGLoader::CreateMovie(const char *filename, const AVOutputFormat *format, const AVCodecContext *VideoCon, const AVCodecContext *AudioCon) { if(!filename) return false; AVOutputFormat *fmt; //*fmt=*format; fmt = guess_format(NULL, filename, NULL); pFormatCon = av_alloc_format_context(); if(!pFormatCon) { cout<<"Error while allocating format context/n"; return false; } bOutput=true; strcpy(pFormatCon->filename,filename); pFormatCon->oformat=fmt; pAudioStream=pVideoStream=NULL; if (fmt->video_codec != CODEC_ID_NONE) { pVideoStream = add_video_stream(pFormatCon, fmt->video_codec,VideoCon); } if (fmt->audio_codec != CODEC_ID_NONE) { pAudioStream = add_audio_stream(pFormatCon, fmt->audio_codec,AudioCon); } if (av_set_parameters(pFormatCon, NULL) < 0) { cout<<"Invalid output format parameters/n"; return false; } if (pVideoStream) open_stream(pFormatCon, pVideoStream); if (pAudioStream) open_stream(pFormatCon, pAudioStream); dump_format(pFormatCon, 0, filename, 1); if (!(fmt->flags & AVFMT_NOFILE)) { if (url_fopen(&pFormatCon->pb, filename, URL_WRONLY) < 0) { cout<<"Could not open '%s'"<<filename<<endl; return false; } } /* write the stream header, if any */ av_write_header(pFormatCon); return true;}
开发者ID:arpu,项目名称:adscanner,代码行数:49,
示例12: getTitleQString getTitle(){ if ( Type != 2 ) return ""; URL_FILE *uFile = url_fopen(curF.toUtf8().data(),""); if (!uFile) return ""; char *data = new char[_DATA_BUFF]; url_fread(data,1,_DATA_BUFF,uFile); url_fclose(uFile); char *t = getICYTitle(data,_DATA_BUFF); QString t2 = t; delete[] t; delete[] data; return t2;}
开发者ID:darwinbeing,项目名称:Hifi-Pod,代码行数:16,
示例13: InternetFormatSupportQString InternetFormatSupport( const char* address ){ loadCURL(); if ( !CURLloaded ) return ""; OggVorbis_File mus; URL_FILE *uF = NULL; uF = url_fopen( address ); bool loaded = !ov_open_callbacks(uF, &mus, NULL, 0, OV_CALLBACKS_URL); ov_clear(&mus); unloadCURL(); if ( loaded ) return plugName; else return "";}
开发者ID:darwinbeing,项目名称:Hifi-Pod,代码行数:16,
示例14: img_read_packetstatic int img_read_packet(AVFormatContext *s1, AVPacket *pkt){ VideoData *s = s1->priv_data; char filename[1024]; int ret; ByteIOContext f1, *f; if (!s->is_pipe) { /* loop over input *//* if (loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; }*/ if (get_frame_filename(filename, sizeof(filename), s->path, s->img_number)<0 && s->img_number > 1) return AVERROR_IO; f = &f1; if (url_fopen(f, filename, URL_RDONLY) < 0) return AVERROR_IO; } else { f = &s1->pb; if (url_feof(f)) return AVERROR_IO; } if (s->is_pipe) { av_new_packet(pkt, 4096); }else{ av_new_packet(pkt, url_filesize(url_fileno(f))); } pkt->stream_index = 0; pkt->flags |= PKT_FLAG_KEY; ret = get_buffer(f, pkt->data, pkt->size); if (!s->is_pipe) { url_fclose(f); } if (ret <= 0) { av_free_packet(pkt); return AVERROR_IO; /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; }}
开发者ID:DmitrySigaev,项目名称:DSMedia,代码行数:46,
示例15: yuv_writestatic int yuv_write(ByteIOContext *pb2, AVImageInfo *info){ ByteIOContext pb1, *pb; char fname[1024], *p; int i, j, width, height; uint8_t *ptr; URLContext *h; static const char *ext = "YUV"; /* XXX: hack hack */ h = url_fileno(pb2); url_get_filename(h, fname, sizeof(fname)); p = strrchr(fname, '.'); if (!p || p[1] != 'Y') return AVERROR_IO; width = info->width; height = info->height; for(i=0;i<3;i++) { if (i == 1) { width >>= 1; height >>= 1; } if (i >= 1) { pb = &pb1; p[1] = ext[i]; if (url_fopen(pb, fname, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = pb2; } ptr = info->pict.data[i]; for(j=0;j<height;j++) { put_buffer(pb, ptr, width); ptr += info->pict.linesize[i]; } put_flush_packet(pb); if (i >= 1) { url_fclose(pb); } }
开发者ID:VoxOx,项目名称:VoxOx,代码行数:45,
示例16: img_read_packetstatic int img_read_packet(AVFormatContext *s1, AVPacket *pkt){ VideoData *s = s1->priv_data; char filename[1024]; int ret; ByteIOContext f1, *f; if (!s->is_pipe) { /* loop over input */ if (loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; } if (get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0) return AVERROR_IO; f = &f1; if (url_fopen(f, filename, URL_RDONLY) < 0) return AVERROR_IO; } else { f = &s1->pb; if (url_feof(f)) return AVERROR_IO; } av_new_packet(pkt, s->img_size); pkt->stream_index = 0; s->ptr = pkt->data; ret = av_read_image(f, filename, s->img_fmt, read_packet_alloc_cb, s); if (!s->is_pipe) { url_fclose(f); } if (ret < 0) { av_free_packet(pkt); return AVERROR_IO; /* signal EOF */ } else { /* XXX: computing this pts is not necessary as it is done in the generic code too */ pkt->pts = av_rescale((int64_t)s->img_count * s1->streams[0]->codec->time_base.num, s1->streams[0]->time_base.den, s1->streams[0]->codec->time_base.den) / s1->streams[0]->time_base.num; s->img_count++; s->img_number++; return 0; }}
开发者ID:Androtos,项目名称:toolchain_benchmark,代码行数:45,
示例17: img_write_packetstatic int img_write_packet(AVFormatContext *s, AVPacket *pkt){ VideoData *img = s->priv_data; ByteIOContext pb1[3], *pb[3]= {&pb1[0], &pb1[1], &pb1[2]}; char filename[1024]; AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) return AVERROR_IO; for(i=0; i<3; i++){ if (url_fopen(pb[i], filename, URL_WRONLY) < 0) return AVERROR_IO; if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } } else { pb[0] = &s->pb; } if(codec->codec_id == CODEC_ID_RAWVIDEO){ int ysize = codec->width * codec->height; put_buffer(pb[0], pkt->data , ysize); put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2); put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2); put_flush_packet(pb[1]); put_flush_packet(pb[2]); url_fclose(pb[1]); url_fclose(pb[2]); }else{ put_buffer(pb[0], pkt->data, pkt->size); } put_flush_packet(pb[0]); if (!img->is_pipe) { url_fclose(pb[0]); } img->img_number++; return 0;}
开发者ID:BOTCrusher,项目名称:sagetv,代码行数:44,
示例18: guess_formatint FFMpegEncoder::configOutput(){ AVOutputFormat *fmt = guess_format(profile.formatStr,NULL,NULL); if (fmt == NULL) return ERR_GUESS_FORMAT; pFormatCtx->oformat = fmt; sprintf(pFormatCtx->filename, "%s", profile.outputFilename); int ret = url_fopen(&pFormatCtx->pb, (char*)profile.outputFilename, URL_WRONLY); /* fifo_open(&pFormatCtx->pb); pFormatCtx->pb->write_packet = fifo_write; pFormatCtx->pb->seek = fifo_seek; AVFifoBuffer *fifo = getFifo(); */ return ret;}
开发者ID:jdzyzh,项目名称:ffmpeg-wrapper,代码行数:19,
示例19: sj_index_loadint sj_index_load(char *filename, SJ_IndexContext *sj_ic){ ByteIOContext pb; register_protocol(&file_protocol); if (url_fopen(&pb, filename, URL_RDONLY) < 0) { // file could not be open return -1; } sj_ic->size = url_fsize(&pb) - HEADER_SIZE; sj_ic->index_num = (sj_ic->size / INDEX_SIZE); sj_ic->indexes = av_malloc(sj_ic->index_num * sizeof(Index)); int64_t magic = get_le64(&pb); if (magic != 0x534A2D494E444558LL) { // not an index file url_fclose(&pb); return -2; } sj_ic->version = get_byte(&pb); sj_ic->start_pts = get_le64(&pb); sj_ic->start_dts = get_le64(&pb); sj_ic->start_timecode.frames = get_byte(&pb); sj_ic->start_timecode.seconds = get_byte(&pb); sj_ic->start_timecode.minutes = get_byte(&pb); sj_ic->start_timecode.hours = get_byte(&pb); if (!sj_ic->index_num) { // empty index url_fclose(&pb); return -4; } for(int i = 0; i < sj_ic->index_num; i++) { read_index(&sj_ic->indexes[i], &pb); } url_fclose(&pb); return 0;}
开发者ID:SmartJog,项目名称:mpeg-indexer,代码行数:39,
示例20: img_write_packetstatic int img_write_packet(AVFormatContext *s, AVPacket *pkt){ VideoData *img = s->priv_data; AVStream *st = s->streams[pkt->stream_index]; ByteIOContext pb1, *pb; AVPicture *picture; int width, height, ret; char filename[1024]; AVImageInfo info; width = st->codec->width; height = st->codec->height; picture = (AVPicture *)pkt->data; if (!img->is_pipe) { if (get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0) return AVERROR_IO; pb = &pb1; if (url_fopen(pb, filename, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = &s->pb; } info.width = width; info.height = height; info.pix_fmt = st->codec->pix_fmt; info.interleaved = 0; /* FIXME: there should be a way to set it right */ info.pict = *picture; ret = av_write_image(pb, img->img_fmt, &info); if (!img->is_pipe) { url_fclose(pb); } img->img_number++; return 0;}
开发者ID:Androtos,项目名称:toolchain_benchmark,代码行数:38,
示例21: open_variantstatic int open_variant(AppleHTTPContext *c, struct variant *var, int skip){ int ret; if (c->cur_seq_no < var->start_seq_no) { av_log(NULL, AV_LOG_WARNING, "seq %d not available in variant %s, skipping/n", var->start_seq_no, var->url); return 0; } if (c->cur_seq_no - var->start_seq_no >= var->n_segments) return c->finished ? AVERROR_EOF : 0; ret = url_fopen(&var->pb, var->segments[c->cur_seq_no - var->start_seq_no]->url, URL_RDONLY); if (ret < 0) return ret; var->ctx->pb = var->pb; /* If this is a new segment in parallel with another one already opened, * skip ahead so they're all at the same dts. */ if (skip && c->last_packet_dts != AV_NOPTS_VALUE) { while (1) { ret = av_read_frame(var->ctx, &var->pkt); if (ret < 0) { if (ret == AVERROR_EOF) { reset_packet(&var->pkt); return 0; } return ret; } if (var->pkt.dts >= c->last_packet_dts) break; av_free_packet(&var->pkt); } } return 0;}
开发者ID:DocOnDev,项目名称:mythtv,代码行数:37,
示例22: InternetFormatSupportQString InternetFormatSupport( const char* address ){ loadCURL(); if ( !CURLloaded ) return ""; mpg123_handle *mus = mpg123_new(NULL, NULL); mpg123_open_feed( mus ); URL_FILE *f = url_fopen( address ); if ( !f ) { unloadCURL(); return ""; } char *data; int _DATA_BUFF; if ( !getDataBuff( f, url_fread, _DATA_BUFF, &data ) ) { url_fclose(f); unloadCURL(); return ""; } int bread = url_fread(data+10, 1, _DATA_BUFF-10, f); mpg123_decode( mus, (const unsigned char*)data, bread, 0,0,0 ); bool loaded = getMusInfo( mus, 0,0,0,0,0, -1, "" ); mpg123_close(mus); mpg123_delete(mus); delete[] data; url_fclose(f); unloadCURL(); if ( loaded ) return plugName; else return "";}
开发者ID:darwinbeing,项目名称:Hifi-Pod,代码行数:36,
示例23: itsFile//.........这里部分代码省略......... AVRational time_base = { frameratebase, framerate }; itsContext.time_base = time_base; const int frb = frameratebase;#elif LIBAVCODEC_VERSION_INT >= 0x000406 && LIBAVCODEC_BUILD > 4665 itsContext.frame_rate = framerate; const int frb = frameratebase; itsContext.frame_rate_base = frb;#else itsContext.frame_rate = framerate; const int frb = FRAME_RATE_BASE;#endif itsContext.gop_size = 10; /* emit one intra frame every ten frames */ if(codec->id != CODEC_ID_MPEG4 && codec->id != CODEC_ID_MPEG1VIDEO && codec->id != CODEC_ID_MPEG2VIDEO) itsContext.max_b_frames = 0; else itsContext.max_b_frames = 1; itsFrameNumber = 0; LINFO("using max_b_frames=%i bitrate=%u width=%u height=%u framerate=%u frameratebase=%u", itsContext.max_b_frames, itsContext.bit_rate, itsContext.width, itsContext.height, framerate, frb); if (avcodec_open(&itsContext, codec) < 0) LFATAL("could not open codec/n"); if (itsUseFormatContext) {#ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS AVCodecContext *c = itsAVStream->codec; c->codec_id = itsContext.codec_id;#ifdef CODEC_TYPE_VIDEO c->codec_type = CODEC_TYPE_VIDEO;#else#ifdef AVMEDIA_TYPE_VIDEO c->codec_type = AVMEDIA_TYPE_VIDEO;#endif#endif /* put sample parameters */ c->bit_rate = itsContext.bit_rate; /* resolution must be a multiple of two */ c->width = itsContext.width; c->height = itsContext.height; /* time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content, timebase should be 1/framerate and timestamp increments should be identically 1. */#if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) c->time_base.den = itsContext.time_base.den; c->time_base.num = itsContext.time_base.num;#endif c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = itsContext.pix_fmt; /* set the output parameters (must be done even if no parameters). */ if (av_set_parameters(itsFormatContext, NULL) < 0) LFATAL("Invalid output format parameters");#if defined(INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER)#if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str());#else if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str());#endif#else#if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str());#else LFATAL("Could not open '%s' ffmpeg version mismatch", oname.c_str());#endif#endif //INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) /* write the stream header, if any */ av_write_header(itsFormatContext);#else LFATAL("Need a new version of FFMPEG for this option");#endif } else { itsFile = fopen(oname.c_str(), "w"); if (itsFile==NULL) LFATAL("could not open file! %s", oname.c_str()); } LINFO("EnCoder Inited");}
开发者ID:ulyssesrr,项目名称:carmen_lcad,代码行数:101,
示例24: avcodec_find_encodervoid VideoStream::OpenStream(){ /* now that all the parameters are set, we can open the video codecs and allocate the necessary encode buffers */ if ( ost ) {#if ZM_FFMPEG_SVN AVCodecContext *c = ost->codec;#else AVCodecContext *c = &ost->codec;#endif /* find the video encoder */ AVCodec *codec = avcodec_find_encoder(c->codec_id); if ( !codec ) { Panic( "codec not found" ); } /* open the codec */ if ( avcodec_open(c, codec) < 0 ) { Panic( "Could not open codec" ); } /* allocate the encoded raw picture */ opicture = avcodec_alloc_frame(); if ( !opicture ) { Panic( "Could not allocate opicture" ); } int size = avpicture_get_size( c->pix_fmt, c->width, c->height); uint8_t *opicture_buf = (uint8_t *)malloc(size); if ( !opicture_buf ) { av_free(opicture); Panic( "Could not allocate opicture" ); } avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt, c->width, c->height ); /* if the output format is not RGB24, then a temporary RGB24 picture is needed too. It is then converted to the required output format */ tmp_opicture = NULL; if ( c->pix_fmt != pf ) { tmp_opicture = avcodec_alloc_frame(); if ( !tmp_opicture ) { Panic( "Could not allocate temporary opicture" ); } int size = avpicture_get_size( pf, c->width, c->height); uint8_t *tmp_opicture_buf = (uint8_t *)malloc(size); if (!tmp_opicture_buf) { av_free( tmp_opicture ); Panic( "Could not allocate temporary opicture" ); } avpicture_fill( (AVPicture *)tmp_opicture, tmp_opicture_buf, pf, c->width, c->height ); } } /* open the output file, if needed */ if ( !(of->flags & AVFMT_NOFILE) ) {#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1) if ( avio_open(&ofc->pb, filename, URL_WRONLY) < 0 )#else if ( url_fopen(&ofc->pb, filename, URL_WRONLY) < 0 )#endif { Fatal( "Could not open '%s'", filename ); } } video_outbuf = NULL; if ( !(ofc->oformat->flags & AVFMT_RAWPICTURE) ) { /* allocate output buffer */ /* XXX: API change will be done */ video_outbuf_size = 200000; video_outbuf = (uint8_t *)malloc(video_outbuf_size); } /* write the stream header, if any */ av_write_header(ofc);}
开发者ID:BoontjieSA,项目名称:ZoneMinder,代码行数:87,
示例25: switch//.........这里部分代码省略......... if (!audio_st) { printf("Lav: new stream failed/n"); return 0; } c = audio_st->codec; c->frame_size=1024; //For AAC mainly, sample per frame switch(audioheader->encoding) { case WAV_AC3: c->codec_id = CODEC_ID_AC3;break; case WAV_MP2: c->codec_id = CODEC_ID_MP2;break; case WAV_MP3:#warning FIXME : Probe deeper c->frame_size=1152; c->codec_id = CODEC_ID_MP3; break; case WAV_PCM: // One chunk is 10 ms (1/100 of fq) c->frame_size=4; c->codec_id = CODEC_ID_PCM_S16LE;break; case WAV_AAC: c->extradata=audioextraData; c->extradata_size= audioextraSize; c->codec_id = CODEC_ID_AAC; break; default: printf("Cant mux that ! audio/n"); printf("Cant mux that ! audio/n"); c->codec_id = CODEC_ID_MP2; return 0; break; } c->codec_type = CODEC_TYPE_AUDIO; c->bit_rate = audioheader->byterate*8; c->rc_buffer_size=(c->bit_rate/(2*8)); // 500 ms worth _audioFq=c->sample_rate = audioheader->frequency; c->channels = audioheader->channels; _audioByterate=audioheader->byterate; } // /audio //---------------------- switch(_type) { case MUXER_MP4: oc->mux_rate=10080*1000; // Needed ? break; case MUXER_TS: oc->mux_rate=10080*1000; break; case MUXER_DVD: oc->packet_size=2048; oc->mux_rate=10080*1000; break; case MUXER_VCD: oc->packet_size=2324; oc->mux_rate=2352 * 75 * 8; break; case MUXER_SVCD: oc->packet_size=2324; oc->mux_rate=2*2352 * 75 * 8; // ? break; default: ADM_assert(0); } oc->preload=AV_TIME_BASE/10; // 100 ms preloading oc->max_delay=200*1000; // 500 ms if (av_set_parameters(oc, NULL) < 0) { printf("Lav: set param failed /n"); return 0; } if (url_fopen(&(oc->pb), filename, URL_WRONLY) < 0) { printf("Lav: Failed to open file :%s/n",filename); return 0; } av_write_header(oc); dump_format(oc, 0, filename, 1); printf("lavformat mpeg muxer initialized/n"); _running=1; one=(1000*1000*1000)/_fps1000; _curDTS=one; return 1;}
开发者ID:BackupTheBerlios,项目名称:avidemux-svn,代码行数:101,
示例26: img_write_packetstatic int img_write_packet(AVFormatContext *s, AVPacket *pkt){ VideoData *img = s->priv_data; ByteIOContext *pb[3]; char filename[1024]; AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) { av_log(s, AV_LOG_ERROR, "Could not get frame filename from pattern/n"); return AVERROR(EIO); } for(i=0; i<3; i++){ if (url_fopen(&pb[i], filename, URL_WRONLY) < 0) { av_log(s, AV_LOG_ERROR, "Could not open file : %s/n",filename); return AVERROR(EIO); } if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } } else { pb[0] = s->pb; } if(codec->codec_id == CODEC_ID_RAWVIDEO){ int ysize = codec->width * codec->height; put_buffer(pb[0], pkt->data , ysize); put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2); put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2); put_flush_packet(pb[1]); put_flush_packet(pb[2]); url_fclose(pb[1]); url_fclose(pb[2]); }else{ if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000){ AVStream *st = s->streams[0]; if(st->codec->extradata_size > 8 && AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){ if(pkt->size < 8 || AV_RL32(pkt->data+4) != MKTAG('j','p','2','c')) goto error; put_be32(pb[0], 12); put_tag (pb[0], "jP "); put_be32(pb[0], 0x0D0A870A); // signature put_be32(pb[0], 20); put_tag (pb[0], "ftyp"); put_tag (pb[0], "jp2 "); put_be32(pb[0], 0); put_tag (pb[0], "jp2 "); put_buffer(pb[0], st->codec->extradata, st->codec->extradata_size); }else if(pkt->size < 8 || (!st->codec->extradata_size && AV_RL32(pkt->data+4) != MKTAG('j','P',' ',' '))){ // signature error: av_log(s, AV_LOG_ERROR, "malformated jpeg2000 codestream/n"); return -1; } } put_buffer(pb[0], pkt->data, pkt->size); } put_flush_packet(pb[0]); if (!img->is_pipe) { url_fclose(pb[0]); } img->img_number++; return 0;}
开发者ID:AllardJ,项目名称:Tomato,代码行数:71,
示例27: ts_interleave_thread_runstatic u32 ts_interleave_thread_run(void *param) { GF_AbstractTSMuxer * mux = (GF_AbstractTSMuxer *) param; AVStream * video_st = mux->video_st; AVStream * audio_st = mux->audio_st; u64 audio_pts, video_pts; u64 audioSize, videoSize, videoKbps, audioKbps; u32 pass; u32 now, start; /* open the output file, if needed */ if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) { if (url_fopen(&mux->oc->pb, mux->destination, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'/n", mux->destination); return 0; } } /* write the stream header, if any */ av_write_header(mux->oc); audio_pts = video_pts = 0; // Buffering... gf_sleep(1000); now = start = gf_sys_clock(); audioSize = videoSize = 0; audioKbps = videoKbps = 0; pass = 0; while ( mux->encode) { pass++; if (0== (pass%16)) { now = gf_sys_clock(); if (now - start > 1000) { videoKbps = videoSize * 8000 / (now-start) / 1024; audioKbps = audioSize * 8000 / (now-start) / 1024; audioSize = videoSize = 0; start = now; GF_LOG(GF_LOG_DEBUG, GF_LOG_MODULE, ("/rPTS audio="LLU" ("LLU"kbps), video="LLU" ("LLU"kbps)", audio_pts, audioKbps, video_pts, videoKbps)); } } /* write interleaved audio and video frames */ if (!video_st || (audio_pts == AV_NOPTS_VALUE && has_packet_ready(mux, mux->audioMx, &mux->audioPackets)) || ((audio_st && audio_pts < video_pts && audio_pts!= AV_NOPTS_VALUE))) { AVPacketList * pl = wait_for_packet(mux, mux->audioMx, &mux->audioPackets); if (!pl) goto exit; audio_pts = pl->pkt.pts ; audioSize+=pl->pkt.size; if (pl->pkt.pts == AV_NOPTS_VALUE) { pl->pkt.pts = 0; } if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write audio interleaved frame audio_pts="LLU", video_pts="LLU"/n", audio_pts, video_pts)); } gf_free(pl); } else { AVPacketList * pl = wait_for_packet(mux, mux->videoMx, &mux->videoPackets); if (!pl) goto exit; video_pts = pl->pkt.pts; /* write the compressed frame in the media file */ if (0 && audio_pts != AV_NOPTS_VALUE && audio_pts > video_pts && pl->next) { u32 skipped = 0; u64 first = video_pts; /* We may be too slow... */ gf_mx_p(mux->videoMx); while (video_pts < audio_pts && pl->next) { AVPacketList * old = pl; // We skip frames... pl = pl->next; video_pts = pl->pkt.pts; skipped++; gf_free(old); } mux->videoPackets = pl->next; gf_mx_v(mux->videoMx); if (skipped > 0) GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("Skipped %u video frames, frame was "LLU", but is now "LLU"/n", skipped, first, video_pts)); } videoSize+=pl->pkt.size; video_pts = pl->pkt.pts; // * video_st->time_base.num / video_st->time_base.den; assert( video_pts); if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write video interleaved frame audio_pts="LLU", video_pts="LLU"/n", audio_pts, video_pts)); } gf_free(pl); } gf_sleep(1); }exit: GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] Ending TS thread.../n")); av_write_trailer(mux->oc); if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) { /* close the output file */ url_fclose(mux->oc->pb); } return 0;}
开发者ID:DmitrySigaev,项目名称:gpac-sf,代码行数:95,
示例28: create_video_filevoid create_video_file(const char*filename,int width,int height){/* auto detect the output format from the name. default is mpeg. */ //fmt = av_guess_format(NULL, filename, NULL);#if (LIBAVFORMAT_VERSION_INT>=AV_VERSION_INT(52,81,0)) #define libavformat_guess_format av_guess_format#else #define libavformat_guess_format guess_format#endif fmt = libavformat_guess_format(NULL, filename, NULL); if (!fmt) { printf("Could not deduce output format from file extension: using MPEG./n"); //fmt = av_guess_format("mpeg", NULL, NULL); fmt = libavformat_guess_format("mpeg", NULL, NULL); } if (!fmt) { fprintf(stderr, "Could not find suitable output format/n"); exit(1); } /* allocate the output media context */ oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Memory error/n"); exit(1); } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename); /* add the audio and video streams using the default format codecs and initialize the codecs */ video_st = NULL; if (fmt->video_codec != CODEC_ID_NONE) { video_st = add_video_stream(oc, fmt->video_codec,width,height); } /* set the output parameters (must be done even if no parameters). */ if (av_set_parameters(oc, NULL) < 0) { fprintf(stderr, "Invalid output format parameters/n"); exit(1); } dump_format(oc, 0, filename, 1); /* now that all the parameters are set, we can open the audio and video codecs and allocate the necessary encode buffers */ if (video_st) open_video(oc, video_st); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'/n", filename); exit(1); } } /* write the stream header, if any */ av_write_header(oc); }
开发者ID:AdamCDunlap,项目名称:hmc-robot-drivers,代码行数:66,
示例29: ffemu_init_muxerstatic bool ffemu_init_muxer(ffemu_t *handle){ AVFormatContext *ctx = avformat_alloc_context(); av_strlcpy(ctx->filename, handle->params.filename, sizeof(ctx->filename)); ctx->oformat = av_guess_format(NULL, ctx->filename, NULL); if (!ctx->oformat) return false; // FFmpeg sure likes to make things difficult.#if defined(AVIO_FLAG_WRITE)#define FFMPEG_FLAG_RW AVIO_FLAG_WRITE#elif defined(AVIO_WRONLY)#define FFMPEG_FLAG_RW AVIO_WRONLY#elif defined(URL_WRONLY)#define FFMPEG_FLAG_RW URL_WRONLY#else#define FFMPEG_FLAG_RW 2 // Seems to be consistent, but you never know.#endif#ifdef HAVE_FFMPEG_AVIO_OPEN if (avio_open(&ctx->pb, ctx->filename, FFMPEG_FLAG_RW) < 0)#else if (url_fopen(&ctx->pb, ctx->filename, FFMPEG_FLAG_RW) < 0)#endif { av_free(ctx); return false; }#ifdef HAVE_FFMPEG_AVFORMAT_NEW_STREAM AVStream *stream = avformat_new_stream(ctx, handle->video.encoder);#else unsigned stream_cnt = 0; AVStream *stream = av_new_stream(ctx, stream_cnt++);#endif stream->codec = handle->video.codec; if (ctx->oformat->flags & AVFMT_GLOBALHEADER) handle->video.codec->flags |= CODEC_FLAG_GLOBAL_HEADER; handle->muxer.vstream = stream; handle->muxer.vstream->sample_aspect_ratio = handle->video.codec->sample_aspect_ratio;#ifdef HAVE_FFMPEG_AVFORMAT_NEW_STREAM stream = avformat_new_stream(ctx, handle->audio.encoder);#else stream = av_new_stream(ctx, stream_cnt++);#endif stream->codec = handle->audio.codec; if (ctx->oformat->flags & AVFMT_GLOBALHEADER) handle->audio.codec->flags |= CODEC_FLAG_GLOBAL_HEADER; handle->muxer.astream = stream;#ifdef HAVE_X264RGB // Avoids a warning at end about non-monotonically increasing DTS values. It seems to be harmless to disable this. if (g_settings.video.h264_record) ctx->oformat->flags |= AVFMT_TS_NONSTRICT;#endif av_dict_set(&ctx->metadata, "title", "RetroArch video dump", 0); #ifdef HAVE_FFMPEG_AVFORMAT_WRITE_HEADER if (avformat_write_header(ctx, NULL) < 0)#else if (av_write_header(ctx) != 0)#endif return false; handle->muxer.ctx = ctx; return true;}
开发者ID:Wyrick,项目名称:RetroArch,代码行数:71,
注:本文中的url_fopen函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ url_fseek函数代码示例 C++ url_fclose函数代码示例 |