您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ vo_format_name函数代码示例

51自学网 2021-06-03 09:46:37
  C++
这篇教程C++ vo_format_name函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中vo_format_name函数的典型用法代码示例。如果您正苦于以下问题:C++ vo_format_name函数的具体用法?C++ vo_format_name怎么用?C++ vo_format_name使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了vo_format_name函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: query_format

/** @brief Query if movie colorspace is supported by the HW. *  @return 0 on failure, device capabilities (not probed *          currently) on success. */static int query_format(uint32_t movie_fmt){    int i;    for (i = 0; i < DISPLAY_FORMAT_TABLE_ENTRIES; i++) {        if (fmt_table[i].mplayer_fmt == movie_fmt) {            /* Test conversion from Movie colorspace to             * display's target colorspace. */            if (FAILED(IDirect3D9_CheckDeviceFormatConversion(priv->d3d_handle,                                                              D3DADAPTER_DEFAULT,                                                              D3DDEVTYPE_HAL,                                                              fmt_table[i].fourcc,                                                              priv->desktop_fmt))) {                mp_msg(MSGT_VO, MSGL_V, "<vo_direct3d>Rejected image format: %s/n",                       vo_format_name(fmt_table[i].mplayer_fmt));                return 0;            }            priv->movie_src_fmt = fmt_table[i].fourcc;            mp_msg(MSGT_VO, MSGL_V, "<vo_direct3d>Accepted image format: %s/n",                   vo_format_name(fmt_table[i].mplayer_fmt));            return (VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW                    | VFCAP_OSD | VFCAP_HWSCALE_UP | VFCAP_HWSCALE_DOWN);        }    }    return 0;}
开发者ID:HermiG,项目名称:mplayer2,代码行数:32,


示例2: config

static int config(struct vf_instance *vf,                  int width, int height, int d_width, int d_height,                  unsigned int flags, unsigned int outfmt){    if ((width <= 0) || (height <= 0) || (d_width <= 0) || (d_height <= 0)) {        mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!/n");        return 0;    }    const vo_info_t *info = video_out->driver->info;    mp_msg(MSGT_CPLAYER, MSGL_INFO, "VO: [%s] %dx%d => %dx%d %s %s%s/n",           info->short_name,           width, height,           d_width, d_height,           vo_format_name(outfmt),           (flags & VOFLAG_FULLSCREEN) ? " [fs]" : "",           (flags & VOFLAG_FLIPPING) ? " [flip]" : "");    mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Description: %s/n", info->name);    mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Author: %s/n", info->author);    if (info->comment && strlen(info->comment) > 0)        mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Comment: %s/n", info->comment);    if (vo_config(video_out, width, height, d_width, d_height, flags, outfmt))        return 0;    return 1;}
开发者ID:maletor,项目名称:mpv,代码行数:28,


示例3: query_format

static int query_format(uint32_t format){    mp_msg(MSGT_VO, MSGL_DBG2,           "vo_x11: query_format was called: %x (%s)/n", format,           vo_format_name(format));    if (IMGFMT_IS_BGR(format))    {        if (IMGFMT_BGR_DEPTH(format) <= 8)            return 0;           // TODO 8bpp not yet fully implemented        if (IMGFMT_BGR_DEPTH(format) == vo_depthonscreen)            return VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_OSD | VFCAP_SWSCALE | VFCAP_FLIP |                VFCAP_ACCEPT_STRIDE;        else            return VFCAP_CSP_SUPPORTED | VFCAP_OSD | VFCAP_SWSCALE | VFCAP_FLIP |                VFCAP_ACCEPT_STRIDE;    }    switch (format)    {//   case IMGFMT_BGR8://   case IMGFMT_BGR15://   case IMGFMT_BGR16://   case IMGFMT_BGR24://   case IMGFMT_BGR32://    return 0x2;//   case IMGFMT_YUY2:        case IMGFMT_I420:        case IMGFMT_IYUV:        case IMGFMT_YV12:            return VFCAP_CSP_SUPPORTED | VFCAP_OSD | VFCAP_SWSCALE | VFCAP_ACCEPT_STRIDE;    }    return 0;}
开发者ID:JasonFengIce,项目名称:mplayer-android,代码行数:33,


示例4: set_csp

static void set_csp(BITMAPINFOHEADER *o_bih,unsigned int outfmt){    int yuv = 0;	switch (outfmt)	{	/* planar format */	case IMGFMT_YV12:	case IMGFMT_I420:	case IMGFMT_IYUV:	    o_bih->biBitCount=12;	    yuv=1;	    break;	case IMGFMT_YVU9:	case IMGFMT_IF09:	    o_bih->biBitCount=9;	    yuv=1;	    break;	/* packed format */	case IMGFMT_YUY2:        case IMGFMT_UYVY:        case IMGFMT_YVYU:    	    o_bih->biBitCount=16;	    yuv=1;	    break;	/* rgb/bgr format */	case IMGFMT_RGB8:	case IMGFMT_BGR8:	    o_bih->biBitCount=8;	    break;	case IMGFMT_RGB15:	case IMGFMT_RGB16:	case IMGFMT_BGR15:	case IMGFMT_BGR16:	    o_bih->biBitCount=16;	    break;	case IMGFMT_RGB24:	case IMGFMT_BGR24:	    o_bih->biBitCount=24;	    break;	case IMGFMT_RGB32:	case IMGFMT_BGR32:	    o_bih->biBitCount=32;	    break;	default:	    mp_msg(MSGT_WIN32,MSGL_ERR,"Unsupported image format: %s/n", vo_format_name(outfmt));	    return;	}	o_bih->biSizeImage = abs(o_bih->biWidth * o_bih->biHeight * (o_bih->biBitCount/8));// Note: we cannot rely on sh->outfmtidx here, it's undefined at this stage!!!//	if (yuv && !(sh->codec->outflags[sh->outfmtidx] & CODECS_FLAG_YUVHACK))	if (yuv)	    o_bih->biCompression = outfmt;	else	    o_bih->biCompression = 0;}
开发者ID:basinilya,项目名称:mplayer,代码行数:57,


示例5: reconfig_video

static void reconfig_video(struct MPContext *mpctx,                           const struct mp_image_params *params,                           bool probe_only){    struct MPOpts *opts = mpctx->opts;    struct dec_video *d_video = mpctx->d_video;    d_video->decoder_output = *params;    set_allowed_vo_formats(d_video->vfilter, mpctx->video_out);    // The event should happen _after_ filter and VO reconfig. Since we don't    // have any fine grained locking, this is just as good.    mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL);    if (video_reconfig_filters(d_video, params) < 0) {        // Most video filters don't work with hardware decoding, so this        // might be the reason filter reconfig failed.        if (!probe_only &&                video_vd_control(d_video, VDCTRL_FORCE_HWDEC_FALLBACK, NULL) == CONTROL_OK)        {            // Fallback active; decoder will return software format next            // time. Don't abort video decoding.            d_video->vfilter->initialized = 0;        }        return;    }    if (d_video->vfilter->initialized < 1)        return;    struct mp_image_params p = d_video->vfilter->output_params;    const struct vo_driver *info = mpctx->video_out->driver;    MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s/n",            info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt));    MP_VERBOSE(mpctx, "VO: Description: %s/n", info->description);    int r = vo_reconfig(mpctx->video_out, &p, 0);    if (r < 0)        d_video->vfilter->initialized = -1;    if (r >= 0) {        if (opts->gamma_gamma != 1000)            video_set_colors(d_video, "gamma", opts->gamma_gamma);        if (opts->gamma_brightness != 1000)            video_set_colors(d_video, "brightness", opts->gamma_brightness);        if (opts->gamma_contrast != 1000)            video_set_colors(d_video, "contrast", opts->gamma_contrast);        if (opts->gamma_saturation != 1000)            video_set_colors(d_video, "saturation", opts->gamma_saturation);        if (opts->gamma_hue != 1000)            video_set_colors(d_video, "hue", opts->gamma_hue);    }}
开发者ID:prodigeni,项目名称:mpv,代码行数:54,


示例6: find_best_out

static unsigned int find_best_out(vf_instance_t *vf, int in_format){    unsigned int best = 0;    int i = -1;    int j = -1;    int format = 0;    // find the best outfmt:    while (1) {        int ret;        if (j < 0) {            format = in_format;            j = 0;        } else if (i < 0) {            while (preferred_conversions[j][0] &&                   preferred_conversions[j][0] != in_format)                j++;            format = preferred_conversions[j++][1];            // switch to standard list            if (!format)                i = 0;        }        if (i >= 0)            format = outfmt_list[i++];        if (!format)            break;        ret = check_outfmt(vf, format);        mp_msg(MSGT_VFILTER, MSGL_DBG2, "scale: query(%s) -> %d/n",               vo_format_name(                   format), ret & 3);        if (ret & VFCAP_CSP_SUPPORTED_BY_HW) {            best = format; // no conversion -> bingo!            break;        }        if (ret & VFCAP_CSP_SUPPORTED && !best)            best = format;  // best with conversion    }    if (!best) {        // Try anything else. outfmt_list is just a list of preferred formats.        for (int cur = IMGFMT_START; cur < IMGFMT_END; cur++) {            int ret = check_outfmt(vf, cur);            if (ret & VFCAP_CSP_SUPPORTED_BY_HW) {                best = cur; // no conversion -> bingo!                break;            }            if (ret & VFCAP_CSP_SUPPORTED && !best)                best = cur;  // best with conversion        }    }    return best;}
开发者ID:songfj,项目名称:mpv,代码行数:54,


示例7: imgfmt2pixfmt

enum AVPixelFormat imgfmt2pixfmt(int fmt){    int i;    enum AVPixelFormat pix_fmt;    for (i = 0; conversion_map[i].fmt; i++)        if (conversion_map[i].fmt == fmt)            break;    pix_fmt = conversion_map[i].pix_fmt;    if (pix_fmt == PIX_FMT_NONE)        mp_msg(MSGT_GLOBAL, MSGL_ERR, "Unsupported format %s/n", vo_format_name(fmt));    return pix_fmt;}
开发者ID:gbeauchesne,项目名称:mplayer-vaapi,代码行数:12,


示例8: find_best

static unsigned int find_best(struct vf_instance_s* vf, unsigned int fmt){    unsigned int best=0;    int ret;    unsigned int* p;    if(fmt==IMGFMT_BGR8) p=bgr_list;    else if(fmt==IMGFMT_RGB8) p=rgb_list;    else return 0;    while(*p){	ret=vf->next->query_format(vf->next,*p);	mp_msg(MSGT_VFILTER,MSGL_DBG2,"[%s] query(%s) -> %d/n",vf->info->name,vo_format_name(*p),ret&3);	if(ret&VFCAP_CSP_SUPPORTED_BY_HW){ best=*p; break;} // no conversion -> bingo!	if(ret&VFCAP_CSP_SUPPORTED && !best) best=*p; // best with conversion	++p;    }    return best;}
开发者ID:batman52,项目名称:dingux-code,代码行数:16,


示例9: reconfig

static int reconfig(struct vf_instance *vf, struct mp_image_params *p, int flags){    if (p->w <= 0 || p->h <= 0 || p->d_w <= 0 || p->d_h <= 0) {        mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!/n");        return -1;    }    const struct vo_driver *info = video_out->driver;    mp_msg(MSGT_CPLAYER, MSGL_INFO, "VO: [%s] %dx%d => %dx%d %s %s/n",           info->name,           p->w, p->h, p->d_w, p->d_h,           vo_format_name(p->imgfmt),           (flags & VOFLAG_FLIPPING) ? " [flip]" : "");    mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Description: %s/n", info->description);    return vo_reconfig(video_out, p, flags);}
开发者ID:CrimsonVoid,项目名称:mpv,代码行数:17,


示例10: config

static int config(struct vf_instance_s* vf,        int width, int height, int d_width, int d_height,	unsigned int flags, unsigned int outfmt){    if ((width <= 0) || (height <= 0) || (d_width <= 0) || (d_height <= 0))    {	mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!/n");	return 0;    }  if(video_out->info)  { const vo_info_t *info = video_out->info;    mp_msg(MSGT_CPLAYER,MSGL_INFO,"VO: [%s] %dx%d => %dx%d %s %s%s%s%s/n",info->short_name,         width, height,         d_width, d_height,	 vo_format_name(outfmt),         (flags&VOFLAG_FULLSCREEN)?" [fs]":"",         (flags&VOFLAG_MODESWITCHING)?" [vm]":"",         (flags&VOFLAG_SWSCALE)?" [zoom]":"",         (flags&VOFLAG_FLIPPING)?" [flip]":"");    mp_msg(MSGT_CPLAYER,MSGL_V,"VO: Description: %s/n",info->name);    mp_msg(MSGT_CPLAYER,MSGL_V,"VO: Author: %s/n", info->author);    if(info->comment && strlen(info->comment) > 0)        mp_msg(MSGT_CPLAYER,MSGL_V,"VO: Comment: %s/n", info->comment);  }    // save vo's stride capability for the wanted colorspace:    vf->default_caps=query_format(vf,outfmt);    if(config_video_out(video_out,width,height,d_width,d_height,flags,"MPlayer",outfmt))	return 0;#ifdef USE_ASS    if (vf->priv->ass_priv)	ass_configure(vf->priv->ass_priv, width, height, !!(vf->default_caps & VFCAP_EOSD_UNSCALED));#endif    ++vo_config_count;    return 1;}
开发者ID:batman52,项目名称:dingux-code,代码行数:40,


示例11: find_best_out

static unsigned int find_best_out(vf_instance_t *vf, int in_format){    unsigned int best=0;    int i = -1;    int normalized_format = normalize_yuvp16(in_format);    int j = normalized_format ? -2 : -1;    int format = 0;    // find the best outfmt:    while (1) {        int ret;        if (j < 0) {            format = j == -1 && normalized_format ? normalized_format : in_format;            j++;        } else if (i < 0) {            while (preferred_conversions[j][0] &&                   preferred_conversions[j][0] != in_format)                j++;            format = preferred_conversions[j++][1];            // switch to standard list            if (!format)                i = 0;        }        if (i >= 0)            format = outfmt_list[i++];        if (!format)            break;        ret = vf_next_query_format(vf, format);        mp_msg(MSGT_VFILTER,MSGL_DBG2,"scale: query(%s) -> %d/n",vo_format_name(format),ret&3);        if(ret&VFCAP_CSP_SUPPORTED_BY_HW){            best=format; // no conversion -> bingo!            break;        }        if(ret&VFCAP_CSP_SUPPORTED && !best)            best=format; // best with conversion    }    return best;}
开发者ID:WilliamRen,项目名称:mplayer-ww,代码行数:38,


示例12: config

static int config(struct vf_instance *vf,                  int width, int height, int d_width, int d_height,                  unsigned int flags, unsigned int outfmt){    if ((width <= 0) || (height <= 0) || (d_width <= 0) || (d_height <= 0)) {        mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!/n");        return 0;    }    const vo_info_t *info = video_out->driver->info;    mp_msg(MSGT_CPLAYER, MSGL_INFO, "VO: [%s] %dx%d => %dx%d %s %s%s%s%s/n",           info->short_name,           width, height,           d_width, d_height,           vo_format_name(outfmt),           (flags & VOFLAG_FULLSCREEN) ? " [fs]" : "",           (flags & VOFLAG_MODESWITCHING) ? " [vm]" : "",           (flags & VOFLAG_SWSCALE) ? " [zoom]" : "",           (flags & VOFLAG_FLIPPING) ? " [flip]" : "");    mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Description: %s/n", info->name);    mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Author: %s/n", info->author);    if (info->comment && strlen(info->comment) > 0)        mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Comment: %s/n", info->comment);    // save vo's stride capability for the wanted colorspace:    vf->default_caps = query_format(vf, outfmt);    vf->draw_slice = (vf->default_caps & VOCAP_NOSLICES) ? NULL : draw_slice;    if (vo_config(video_out, width, height, d_width, d_height, flags, outfmt))        return 0;    vf->priv->scale_ratio = (double) d_width / d_height * height / width;    return 1;}
开发者ID:divVerent,项目名称:mplayer2,代码行数:36,


示例13: mpcodecs_config_vo

int mpcodecs_config_vo(sh_video_t *sh, int w, int h,                       const unsigned int *outfmts,                       unsigned int preferred_outfmt){    struct MPOpts *opts = sh->opts;    int j;    unsigned int out_fmt = 0;    int screen_size_x = 0;    int screen_size_y = 0;    vf_instance_t *vf = sh->vfilter;    int vocfg_flags = 0;    if (w)        sh->disp_w = w;    if (h)        sh->disp_h = h;    mp_msg(MSGT_DECVIDEO, MSGL_V,           "VIDEO:  %dx%d  %5.3f fps  %5.1f kbps (%4.1f kB/s)/n",           sh->disp_w, sh->disp_h, sh->fps, sh->i_bps * 0.008,           sh->i_bps / 1000.0);    if (!sh->disp_w || !sh->disp_h)        return 0;    mp_msg(MSGT_DECVIDEO, MSGL_V,           "VDec: vo config request - %d x %d (preferred colorspace: %s)/n",           w, h, vo_format_name(preferred_outfmt));    if (get_video_quality_max(sh) <= 0 && divx_quality) {        // user wants postprocess but no pp filter yet:        sh->vfilter = vf = vf_open_filter(opts, vf, "pp", NULL);    }    if (!outfmts || sh->codec->outfmt[0] != 0xffffffff)        outfmts = sh->codec->outfmt;    // check if libvo and codec has common outfmt (no conversion):  csp_again:    if (mp_msg_test(MSGT_DECVIDEO, MSGL_V)) {        mp_msg(MSGT_DECVIDEO, MSGL_V, "Trying filter chain:");        for (vf_instance_t *f = vf; f; f = f->next)            mp_msg(MSGT_DECVIDEO, MSGL_V, " %s", f->info->name);        mp_msg(MSGT_DECVIDEO, MSGL_V, "/n");    }    j = -1;    for (int i = 0; i < CODECS_MAX_OUTFMT; i++) {        int flags;        out_fmt = outfmts[i];        if (out_fmt == (unsigned int) 0xFFFFFFFF)            break;        flags = vf->query_format(vf, out_fmt);        mp_msg(MSGT_CPLAYER, MSGL_DBG2,               "vo_debug: query(%s) returned 0x%X (i=%d) /n",               vo_format_name(out_fmt), flags, i);        if ((flags & VFCAP_CSP_SUPPORTED_BY_HW)            || (flags & VFCAP_CSP_SUPPORTED && j < 0)) {            // check (query) if codec really support this outfmt...            sh->outfmtidx = j; // pass index to the control() function this way            if (sh->vd_driver->control(sh, VDCTRL_QUERY_FORMAT, &out_fmt) ==                CONTROL_FALSE) {                mp_msg(MSGT_CPLAYER, MSGL_DBG2,                       "vo_debug: codec query_format(%s) returned FALSE/n",                       vo_format_name(out_fmt));                continue;            }            j = i;            sh->output_flags = flags;            if (flags & VFCAP_CSP_SUPPORTED_BY_HW)                break;        }    }    if (j < 0) {        // TODO: no match - we should use conversion...        if (strcmp(vf->info->name, "scale")) {            mp_tmsg(MSGT_DECVIDEO, MSGL_INFO, "Could not find matching colorspace - retrying with -vf scale.../n");            vf = vf_open_filter(opts, vf, "scale", NULL);            goto csp_again;        }        mp_tmsg(MSGT_CPLAYER, MSGL_WARN,            "The selected video_out device is incompatible with this codec./n"/            "Try appending the scale filter to your filter list,/n"/            "e.g. -vf spp,scale instead of -vf spp./n");        sh->vf_initialized = -1;        return 0;               // failed    }    out_fmt = outfmts[j];    sh->outfmt = out_fmt;    mp_msg(MSGT_CPLAYER, MSGL_V, "VDec: using %s as output csp (no %d)/n",           vo_format_name(out_fmt), j);    sh->outfmtidx = j;    sh->vfilter = vf;    // autodetect flipping    if (opts->flip == -1) {        opts->flip = 0;        if (sh->codec->outflags[j] & CODECS_FLAG_FLIP)            if (!(sh->codec->outflags[j] & CODECS_FLAG_NOFLIP))//.........这里部分代码省略.........
开发者ID:kax4,项目名称:mpv,代码行数:101,


示例14: config

//.........这里部分代码省略.........    ggiCheckMode(ggi_conf.vis, &mode);    if (ggiSetMode(ggi_conf.vis, &mode) < 0) {        mp_msg(MSGT_VO, MSGL_ERR, "[ggi] unable to set display mode/n");        return (-1);    }    if (ggiGetMode(ggi_conf.vis, &mode) < 0) {        mp_msg(MSGT_VO, MSGL_ERR, "[ggi] unable to get display mode/n");        return (-1);    }    if ((mode.graphtype == GT_INVALID)       || (mode.graphtype == GT_AUTO))    {        mp_msg(MSGT_VO, MSGL_ERR, "[ggi] not supported depth/bpp/n");        return (-1);    }#if 0    printf("[ggi] mode: ");    ggiPrintMode(&mode);    printf("/n");#endif#ifdef HAVE_GGIWMH    ggiWmhSetTitle(ggi_conf.vis, title);    if (vo_ontop) window_ontop();#endif    ggiSetFlags(ggi_conf.vis, GGIFLAG_ASYNC);    if (GT_SCHEME(mode.graphtype) == GT_PALETTE)        ggiSetColorfulPalette(ggi_conf.vis);    if (GT_SCHEME(mode.graphtype) != GT_TRUECOLOR) {        ggi_mode drawmode;        ggi_conf.drawvis = ggiOpen("display-memory", NULL);        if (ggi_conf.drawvis == NULL) {            mp_msg(MSGT_VO, MSGL_ERR,                   "[ggi] unable to get backbuffer for conversion/n");            return -1;        }        memcpy(&drawmode, &mode, sizeof(ggi_mode));        drawmode.graphtype = GT_32BIT;        drawmode.size.x = GGI_AUTO;        drawmode.size.y = GGI_AUTO;        ggiCheckMode(ggi_conf.drawvis, &drawmode);        if (ggiSetMode(ggi_conf.drawvis, &drawmode) < 0) {            mp_msg(MSGT_VO, MSGL_ERR,                   "[ggi] unable to set backbuffer mode/n");            return -1;        }        mode.graphtype = drawmode.graphtype;        ggiSetFlags(ggi_conf.drawvis, GGIFLAG_ASYNC);    }    vo_depthonscreen = GT_DEPTH(mode.graphtype);    vo_screenwidth = mode.virt.x;    vo_screenheight = mode.virt.y;    vo_dwidth = width;    vo_dheight = height;    vo_dbpp = GT_SIZE(mode.graphtype);    /* calculate top, left corner */    vo_dx = (vo_screenwidth - vo_dwidth) / 2;    vo_dy = (vo_screenheight - vo_dheight) / 2;    ggi_conf.srcwidth = width;    ggi_conf.srcheight = height;    ggi_conf.srcformat = format;    ggi_conf.voflags = flags;    if (IMGFMT_IS_RGB(ggi_conf.srcformat)) {        ggi_conf.srcdepth = IMGFMT_RGB_DEPTH(ggi_conf.srcformat);    } else if (IMGFMT_IS_BGR(ggi_conf.srcformat)) {        ggi_conf.srcdepth = IMGFMT_BGR_DEPTH(ggi_conf.srcformat);    } else {        mp_msg(MSGT_VO, MSGL_FATAL, "[ggi] Unknown image format: %s/n",               vo_format_name(ggi_conf.srcformat));        return (-1);    }    mp_msg(MSGT_VO, MSGL_INFO, "[ggi] input: %dx%dx%d, output: %dx%dx%d/n",           ggi_conf.srcwidth, ggi_conf.srcheight, ggi_conf.srcdepth,           mode.virt.x, mode.virt.y, vo_dbpp);    ggi_conf.srcbpp = (ggi_conf.srcdepth + 7) / 8;    ggi_conf.flushregion.x1 = vo_dx;    ggi_conf.flushregion.y1 = vo_dy;    ggi_conf.flushregion.x2 = vo_dwidth;    ggi_conf.flushregion.y2 = vo_dheight;    return (0);}
开发者ID:batman52,项目名称:dingux-code,代码行数:101,


示例15: config

//.........这里部分代码省略.........	    // downscale!	    if(vo_flags&VFCAP_HWSCALE_DOWN) x=0;	} else {	    // upscale:	    if(vo_flags&VFCAP_HWSCALE_UP) x=0;	}	if(x){	    // user wants sw scaling! (-zoom)	    vf->priv->w=d_width;	    vf->priv->h=d_height;	}    }    if(vf->priv->noup){        if((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup){            vf->priv->w= width;            vf->priv->h= height;        }    }    if (vf->priv->w <= -8) {      vf->priv->w += 8;      round_w = 1;    }    if (vf->priv->h <= -8) {      vf->priv->h += 8;      round_h = 1;    }    if (vf->priv->w < -3 || vf->priv->h < -3 ||         (vf->priv->w < -1 && vf->priv->h < -1)) {      // TODO: establish a direct connection to the user's brain      // and find out what the heck he thinks MPlayer should do      // with this nonsense.      mp_msg(MSGT_VFILTER, MSGL_ERR, "SwScale: EUSERBROKEN Check your parameters, they make no sense!/n");      return 0;    }    if (vf->priv->w == -1)      vf->priv->w = width;    if (vf->priv->w == 0)      vf->priv->w = d_width;    if (vf->priv->h == -1)      vf->priv->h = height;    if (vf->priv->h == 0)      vf->priv->h = d_height;    if (vf->priv->w == -3)      vf->priv->w = vf->priv->h * width / height;    if (vf->priv->w == -2)      vf->priv->w = vf->priv->h * d_width / d_height;    if (vf->priv->h == -3)      vf->priv->h = vf->priv->w * height / width;    if (vf->priv->h == -2)      vf->priv->h = vf->priv->w * d_height / d_width;    if (round_w)      vf->priv->w = ((vf->priv->w + 8) / 16) * 16;    if (round_h)      vf->priv->h = ((vf->priv->h + 8) / 16) * 16;    // calculate the missing parameters:    switch(best) {    case IMGFMT_YV12:		/* YV12 needs w & h rounded to 2 */    case IMGFMT_I420:    case IMGFMT_IYUV:    case IMGFMT_NV12:    case IMGFMT_NV21:      vf->priv->h = (vf->priv->h + 1) & ~1;    case IMGFMT_YUY2:		/* YUY2 needs w rounded to 2 */    case IMGFMT_UYVY:      vf->priv->w = (vf->priv->w + 1) & ~1;    }    mp_msg(MSGT_VFILTER,MSGL_DBG2,"SwScale: scaling %dx%d %s to %dx%d %s  /n",	width,height,vo_format_name(outfmt),	vf->priv->w,vf->priv->h,vo_format_name(best));    // free old ctx:    if(vf->priv->ctx) sws_freeContext(vf->priv->ctx);    if(vf->priv->ctx2)sws_freeContext(vf->priv->ctx2);    // new swscaler:    sws_getFlagsAndFilterFromCmdLine(&int_sws_flags, &srcFilter, &dstFilter);    int_sws_flags|= vf->priv->v_chr_drop << SWS_SRC_V_CHR_DROP_SHIFT;    int_sws_flags|= vf->priv->accurate_rnd * SWS_ACCURATE_RND;    vf->priv->ctx=sws_getContext(width, height >> vf->priv->interlaced,	    sfmt,		  vf->priv->w, vf->priv->h >> vf->priv->interlaced,	    dfmt,	    int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param);    if(vf->priv->interlaced){        vf->priv->ctx2=sws_getContext(width, height >> 1,	    sfmt,		  vf->priv->w, vf->priv->h >> 1,	    dfmt,	    int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param);    }
开发者ID:dr4g0nsr,项目名称:mplayer-skyviia-8860,代码行数:101,


示例16: draw_osd

static void draw_osd(struct vo *vo, struct osd_state *osd){    struct priv *p = vo->priv;    struct mp_image img = get_x_buffer(p);    struct mp_osd_res res = {        .w = img.w,        .h = img.h,        .display_par = vo->monitor_par,        .video_par = vo->aspdat.par,    };    osd_draw_on_image_bk(osd, res, osd->vo_pts, 0, p->osd_backup, &img);}static mp_image_t *get_screenshot(struct vo *vo){    struct priv *p = vo->priv;    struct mp_image img = get_x_buffer(p);    struct mp_image *res = alloc_mpi(img.w, img.h, img.imgfmt);    copy_mpi(res, &img);    mp_draw_sub_backup_restore(p->osd_backup, res);    return res;}static int redraw_frame(struct vo *vo){    struct priv *p = vo->priv;    struct mp_image img = get_x_buffer(p);    mp_draw_sub_backup_restore(p->osd_backup, &img);    return true;}static void flip_page(struct vo *vo){    struct priv *p = vo->priv;    Display_Image(p, p->myximage, p->ImageData);    XSync(vo->x11->display, False);}static int draw_slice(struct vo *vo, uint8_t *src[], int stride[], int w, int h,                      int x, int y){    struct priv *p = vo->priv;    uint8_t *dst[MP_MAX_PLANES] = {NULL};    int dstStride[MP_MAX_PLANES] = {0};    if ((p->old_vo_dwidth != vo->dwidth || p->old_vo_dheight != vo->dheight)        /*&& y==0 */ && p->zoomFlag)    {        int newW = vo->dwidth;        int newH = vo->dheight;        struct SwsContext *oldContext = p->swsContext;        p->old_vo_dwidth = vo->dwidth;        p->old_vo_dheight = vo->dheight;        if (vo_fs)            aspect(vo, &newW, &newH, A_ZOOM);        if (sws_flags == 0)            newW &= (~31);      // not needed but, if the user wants the FAST_BILINEAR SCALER, then its needed        p->swsContext            = sws_getContextFromCmdLine(p->srcW, p->srcH, p->in_format, newW,                                        newH, p->out_format);        if (p->swsContext) {            p->image_width = (newW + 7) & (~7);            p->image_height = newH;            freeMyXImage(p);            getMyXImage(p);            sws_freeContext(oldContext);        } else            p->swsContext = oldContext;        p->dst_width = newW;    }    dstStride[0] = p->image_width * ((p->bpp + 7) / 8);    dst[0] = p->ImageData;    if (p->Flip_Flag) {        dst[0] += dstStride[0] * (p->image_height - 1);        dstStride[0] = -dstStride[0];    }    sws_scale(p->swsContext, (const uint8_t **)src, stride, y, h, dst,              dstStride);    mp_draw_sub_backup_reset(p->osd_backup);    return 0;}static int query_format(struct vo *vo, uint32_t format){    mp_msg(MSGT_VO, MSGL_DBG2,           "vo_x11: query_format was called: %x (%s)/n", format,           vo_format_name(format));    if (IMGFMT_IS_BGR(format)) {//.........这里部分代码省略.........
开发者ID:kax4,项目名称:mpv,代码行数:101,


示例17: DisplayMediaType

void DisplayMediaType(const char * label,const AM_MEDIA_TYPE* pmt){    WAVEFORMATEX* pWF;    VIDEOINFOHEADER* Vhdr;    int i;    GUID* iid;           Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"=======================/n");    if(label){        Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"AM_MEDIA_TYPE: %s/n",label);    }else        Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"AM_MEDIA_TYPE:/n");    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"-(Ptr:%p)--------/n",pmt);    for(i=0;i<sizeof(AM_MEDIA_TYPE);i++){        Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"%02x ",(BYTE)((BYTE*)pmt)[i]);        if((i+1)%8==0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"/n");    }    if((i)%8!=0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"/n");    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"-(Ptr:%p)--(%lu)--/n",pmt->pbFormat,pmt->cbFormat);    for(i=0;i<pmt->cbFormat;i++){        Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"%02x ",(BYTE)pmt->pbFormat[i]);        if((i+1)%8==0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"/n");    }    if((i)%8!=0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"/n");    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"-----------------------/n");    iid=(GUID*)&(pmt->subtype);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Subtype:     %08x-%04x-%04x-%02x%02x-"		 "%02x%02x%02x%02x%02x%02x/n",		 iid->f1,  iid->f2,  iid->f3,		 (unsigned char)iid->f4[1], (unsigned char)iid->f4[0],		 (unsigned char)iid->f4[2], (unsigned char)iid->f4[3],		 (unsigned char)iid->f4[4], (unsigned char)iid->f4[5],		 (unsigned char)iid->f4[6], (unsigned char)iid->f4[7]);    iid=(GUID*)&(pmt->formattype);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Format type: %08x-%04x-%04x-%02x%02x-"		 "%02x%02x%02x%02x%02x%02x/n",		 iid->f1,  iid->f2,  iid->f3,		 (unsigned char)iid->f4[1], (unsigned char)iid->f4[0],		 (unsigned char)iid->f4[2], (unsigned char)iid->f4[3],		 (unsigned char)iid->f4[4], (unsigned char)iid->f4[5],		 (unsigned char)iid->f4[6], (unsigned char)iid->f4[7]);    if(pmt && memcmp(&pmt->formattype,&FORMAT_WaveFormatEx,16)==0 && pmt->pbFormat){    pWF=(WAVEFORMATEX*)pmt->pbFormat;    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nChannels %d/n",pWF->nChannels);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nSamplesPerSec %ld/n",pWF->nSamplesPerSec);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: wBitsPerSample %d/n",pWF->wBitsPerSample);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nBlockAlign %d/n",pWF->nBlockAlign);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nAvgBytesPerSec %ld/n",pWF->nAvgBytesPerSec);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: SampleSize %ld/n",pmt->lSampleSize);    }    if(pmt && memcmp(&pmt->formattype,&FORMAT_VideoInfo,16)==0 && pmt->pbFormat){    Vhdr=(VIDEOINFOHEADER*)pmt->pbFormat;    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: dwBitRate %ld/n",Vhdr->dwBitRate);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biWidth %ld/n",Vhdr->bmiHeader.biWidth);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biHeight %ld/n",Vhdr->bmiHeader.biHeight);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biSizeImage %ld/n",Vhdr->bmiHeader.biSizeImage);    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biBitCount %d/n",Vhdr->bmiHeader.biBitCount);    if(Vhdr->bmiHeader.biCompression){        Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biComression 0x%08lx (%s)/n",Vhdr->bmiHeader.biCompression,vo_format_name(Vhdr->bmiHeader.biCompression));    }else        Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biComression 0x00000000/n");    }    Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"=======================/n");}
开发者ID:batman52,项目名称:dingux-code,代码行数:66,


示例18: config

//.........这里部分代码省略.........       /*|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))*/){        lavc_venc_context->flags |= CODEC_FLAG_GLOBAL_HEADER;    }    if(lavc_param_video_global_header&2){        lavc_venc_context->flags2 |= CODEC_FLAG2_LOCAL_HEADER;    }    lavc_venc_context->mv0_threshold = lavc_param_mv0_threshold;    lavc_venc_context->refs = lavc_param_refs;    lavc_venc_context->b_sensitivity = lavc_param_b_sensitivity;    lavc_venc_context->level = lavc_param_level;    mux_v->imgfmt = lavc_param_format;    switch(lavc_param_format)    {	case IMGFMT_YV12:	    lavc_venc_context->pix_fmt = PIX_FMT_YUV420P;	    break;	case IMGFMT_422P:	    lavc_venc_context->pix_fmt = PIX_FMT_YUV422P;	    break;	case IMGFMT_444P:	    lavc_venc_context->pix_fmt = PIX_FMT_YUV444P;	    break;	case IMGFMT_411P:	    lavc_venc_context->pix_fmt = PIX_FMT_YUV411P;	    break;	case IMGFMT_YVU9:	    lavc_venc_context->pix_fmt = PIX_FMT_YUV410P;	    break;	case IMGFMT_BGR32:	    lavc_venc_context->pix_fmt = PIX_FMT_RGB32;	    break;	default:    	    mp_msg(MSGT_MENCODER,MSGL_ERR,"%s is not a supported format/n", vo_format_name(lavc_param_format));    	    return 0;    }    if(!stats_file) {    /* lavc internal 2pass bitrate control */    switch(lavc_param_vpass){    case 2:    case 3:	lavc_venc_context->flags|= CODEC_FLAG_PASS2; 	stats_file= fopen(passtmpfile, "rb");	if(stats_file==NULL){	    mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s/n", passtmpfile);            return 0;	}	fseek(stats_file, 0, SEEK_END);	size= ftell(stats_file);	fseek(stats_file, 0, SEEK_SET);		lavc_venc_context->stats_in= av_malloc(size + 1);	lavc_venc_context->stats_in[size]=0;	if(fread(lavc_venc_context->stats_in, size, 1, stats_file)<1){	    mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: reading from filename=%s/n", passtmpfile);            return 0;	}        	if(lavc_param_vpass == 2)	    break;	else	    fclose(stats_file);	    /* fall through */    case 1: 	lavc_venc_context->flags|= CODEC_FLAG_PASS1; 
开发者ID:batman52,项目名称:dingux-code,代码行数:67,


示例19: init

//.........这里部分代码省略.........    OutBufferRect.top=0;    OutBufferRect.left=0;    OutBufferRect.right=sh->disp_w;    OutBufferRect.bottom=sh->disp_h;    //Fill the imagedescription for our SVQ3 frame    //we can probably get this from Demuxer    if (!sh->ImageDesc && extradata_size >= sizeof(ImageDescription) &&        ((ImageDescription *)extradata)->idSize <= extradata_size)        sh->ImageDesc = extradata;    if (sh->ImageDesc) {        mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageDescription size: %d/n",((ImageDescription*)(sh->ImageDesc))->idSize);        framedescHandle=(ImageDescriptionHandle)NewHandleClear(((ImageDescription*)(sh->ImageDesc))->idSize);        memcpy(*framedescHandle,sh->ImageDesc,((ImageDescription*)(sh->ImageDesc))->idSize);    } else {        // assume extradata consists only of the atoms, build the other parts        ImageDescription *idesc;        int size = sizeof(*idesc) + extradata_size;        mp_msg(MSGT_DECVIDEO, MSGL_V, "Generating a ImageDescription/n");        framedescHandle=(ImageDescriptionHandle)NewHandleClear(size);        idesc = *framedescHandle;        memcpy(idesc + 1, extradata, extradata_size);        idesc->idSize = size;        idesc->width  = sh->disp_w;        idesc->height = sh->disp_h;    }    dump_ImageDescription(*framedescHandle);    (**framedescHandle).cType = bswap_32(sh->format);    sh->context = (void *)kYUVSPixelFormat;    {	int imgfmt = sh->codec->outfmt[sh->outfmtidx];	int qt_imgfmt;    switch(imgfmt)    {	case IMGFMT_YUY2:	    qt_imgfmt = kYUVSPixelFormat;	    break;	case IMGFMT_YVU9:	    qt_imgfmt = 0x73797639; //kYVU9PixelFormat;	    break;	case IMGFMT_YV12:	    qt_imgfmt = 0x79343230;	    break;	case IMGFMT_UYVY:	    qt_imgfmt = k2vuyPixelFormat;	    break;	case IMGFMT_YVYU:	    qt_imgfmt = kYVYU422PixelFormat;	    imgfmt = IMGFMT_YUY2;	    break;	case IMGFMT_RGB16:	    qt_imgfmt = k16LE555PixelFormat;	    break;	case IMGFMT_BGR24:	    qt_imgfmt = k24BGRPixelFormat;	    break;	case IMGFMT_BGR32:	    qt_imgfmt = k32BGRAPixelFormat;	    break;	case IMGFMT_RGB32:	    qt_imgfmt = k32RGBAPixelFormat;	    break;	default:	    mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Unknown requested csp/n");	    return 0;    }    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"imgfmt: %s qt_imgfmt: %.4s/n", vo_format_name(imgfmt), (char *)&qt_imgfmt);    sh->context = (void *)qt_imgfmt;    if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,imgfmt)) return 0;    }    mpi=mpcodecs_get_image(sh, MP_IMGTYPE_STATIC, MP_IMGFLAG_PRESERVE,	sh->disp_w, sh->disp_h);    if(!mpi) return 0;    result = QTNewGWorldFromPtr(        &OutBufferGWorld,	(OSType)sh->context,        &OutBufferRect,   //we should benchmark if yvu9 is faster for svq3, too        0,        0,        0,        mpi->planes[0],        mpi->stride[0]);    if (result) {        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"QTNewGWorldFromPtr result=%d/n",result);        return 0;    }    result = DecompressSequenceBegin(&imageSeq, framedescHandle, (CGrafPtr)OutBufferGWorld,                                     NULL, NULL, NULL, srcCopy,  NULL, 0,                                     codecNormalQuality, 0);    if(result) {        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"DecompressSequenceBegin result=%d/n",result);        return 0;    }    return 1;}
开发者ID:azuwis,项目名称:mplayer,代码行数:101,


示例20: video_reconfig_filters

int video_reconfig_filters(struct dec_video *d_video,                           const struct mp_image_params *params){    struct MPOpts *opts = d_video->opts;    struct mp_image_params p = *params;    struct sh_video *sh = d_video->header->video;    MP_VERBOSE(d_video, "VIDEO:  %dx%d  %5.3f fps  %5.1f kbps (%4.1f kB/s)/n",               p.w, p.h, sh->fps, sh->i_bps * 0.008,               sh->i_bps / 1000.0);    MP_VERBOSE(d_video, "VDec: vo config request - %d x %d (%s)/n",               p.w, p.h, vo_format_name(p.imgfmt));    float decoder_aspect = p.d_w / (float)p.d_h;    if (d_video->initial_decoder_aspect == 0)        d_video->initial_decoder_aspect = decoder_aspect;    // We normally prefer the container aspect, unless the decoder aspect    // changes at least once.    if (d_video->initial_decoder_aspect == decoder_aspect) {        if (sh->aspect > 0)            vf_set_dar(&p.d_w, &p.d_h, p.w, p.h, sh->aspect);    } else {        // Even if the aspect switches back, don't use container aspect again.        d_video->initial_decoder_aspect = -1;    }    float force_aspect = opts->movie_aspect;    if (force_aspect > -1.0 && d_video->stream_aspect != 0.0)        force_aspect = d_video->stream_aspect;    if (force_aspect > 0)        vf_set_dar(&p.d_w, &p.d_h, p.w, p.h, force_aspect);    if (abs(p.d_w - p.w) >= 4 || abs(p.d_h - p.h) >= 4) {        MP_VERBOSE(d_video, "Aspect ratio is %.2f:1 - "                   "scaling to correct movie aspect./n", sh->aspect);        MP_SMODE(d_video, "ID_VIDEO_ASPECT=%1.4f/n", sh->aspect);    } else {        p.d_w = p.w;        p.d_h = p.h;    }    // Apply user overrides    if (opts->requested_colorspace != MP_CSP_AUTO)        p.colorspace = opts->requested_colorspace;    if (opts->requested_input_range != MP_CSP_LEVELS_AUTO)        p.colorlevels = opts->requested_input_range;    p.outputlevels = opts->requested_output_range;    // Detect colorspace from resolution.    // Make sure the user-overrides are consistent (no RGB csp for YUV, etc.).    mp_image_params_guess_csp(&p);    // Time to config libvo!    MP_VERBOSE(d_video, "VO Config (%dx%d->%dx%d,0x%X)/n",               p.w, p.h, p.d_w, p.d_h, p.imgfmt);    if (vf_reconfig(d_video->vfilter, &p) < 0) {        MP_WARN(d_video, "FATAL: Cannot initialize video driver./n");        return -1;    }    d_video->vf_input = p;    return 0;}
开发者ID:Aseeker,项目名称:mpv,代码行数:68,


示例21: vlvo_init

int      vlvo_init(unsigned src_width,unsigned src_height,                   unsigned x_org,unsigned y_org,unsigned dst_width,                   unsigned dst_height,unsigned format,unsigned dest_bpp){    size_t i,awidth;    mp_msg(MSGT_VO,MSGL_WARN, MSGTR_LIBVO_VESA_ThisBranchIsNoLongerSupported);    return -1;    if( mp_msg_test(MSGT_VO,MSGL_DBG2) ) {        mp_msg(MSGT_VO,MSGL_DBG2, "vesa_lvo: vlvo_init() was called/n");    }    image_width = src_width;    image_height = src_height;    mga_vid_config.version=MGA_VID_VERSION;    src_format = mga_vid_config.format=format;    awidth = (src_width + (WIDTH_ALIGN-1)) & ~(WIDTH_ALIGN-1);    switch(format) {    case IMGFMT_YV12:    case IMGFMT_I420:    case IMGFMT_IYUV:        image_bpp=16;        mga_vid_config.frame_size = awidth*src_height+(awidth*src_height)/2;        break;    case IMGFMT_YUY2:    case IMGFMT_UYVY:        image_bpp=16;        mga_vid_config.frame_size = awidth*src_height*2;        break;    case IMGFMT_RGB15:    case IMGFMT_BGR15:    case IMGFMT_RGB16:    case IMGFMT_BGR16:        image_bpp=16;        mga_vid_config.frame_size = awidth*src_height*2;        break;    case IMGFMT_RGB24:    case IMGFMT_BGR24:        image_bpp=24;        mga_vid_config.frame_size = awidth*src_height*3;        break;    case IMGFMT_RGB32:    case IMGFMT_BGR32:        image_bpp=32;        mga_vid_config.frame_size = awidth*src_height*4;        break;    default:        mp_msg(MSGT_VO,MSGL_WARN, MSGTR_LIBVO_VESA_InvalidOutputFormat,vo_format_name(format),format);        return -1;    }    mga_vid_config.colkey_on=0;    mga_vid_config.src_width = src_width;    mga_vid_config.src_height= src_height;    mga_vid_config.dest_width = dst_width;    mga_vid_config.dest_height= dst_height;    mga_vid_config.x_org=x_org;    mga_vid_config.y_org=y_org;    mga_vid_config.num_frames=NUM_FRAMES;    if (ioctl(lvo_handler,MGA_VID_CONFIG,&mga_vid_config))    {        perror("vesa_lvo: Error in mga_vid_config ioctl()");        mp_msg(MSGT_VO,MSGL_WARN, MSGTR_LIBVO_VESA_IncompatibleDriverVersion);        return -1;    }    ioctl(lvo_handler,MGA_VID_ON,0);    frames[0] = (char*)mmap(0,mga_vid_config.frame_size*mga_vid_config.num_frames,PROT_WRITE,MAP_SHARED,lvo_handler,0);    for(i=1; i<NUM_FRAMES; i++)        frames[i] = frames[i-1] + mga_vid_config.frame_size;    next_frame = 0;    lvo_mem = frames[next_frame];    /*clear the buffer*/    memset(frames[0],0x80,mga_vid_config.frame_size*mga_vid_config.num_frames);    return 0;}
开发者ID:w-spencer,项目名称:sagetv,代码行数:74,


示例22: MovDecoder_InitSubsystem

//.........这里部分代码省略.........  if(!handler)  {    //mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load qtmlClient.dll/n");    return 0;  }  result=InitializeQTML(6+16);  //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"InitializeQTML returned %li/n",result);  memset(&desc,0,sizeof(desc));  desc.componentType= (((unsigned char)'i')<<24)|(((unsigned char)'m')<<16)|(((unsigned char)'d')<<8)|(((unsigned char)'c'));  desc.componentSubType = bswap_32(sh->format);  desc.componentManufacturer=0;  desc.componentFlags=0;  desc.componentFlagsMask=0; // mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Count = %ld/n",CountComponents(&desc));  prev=FindNextComponent(NULL,&desc);  if(!prev)  {    //mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Cannot find requested component/n");    return(0);  }  //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Found it! ID = %p/n",prev);  ci=OpenComponent(prev);  //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ci=%p/n",ci);  memset(&icap,0,sizeof(icap));  cres=ImageCodecInitialize(ci,&icap);  //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageCodecInitialize->%#x  size=%d (%d)/n",cres,icap.recordSize,icap.decompressRecordSize);  memset(&cinfo,0,sizeof(cinfo));  cres=ImageCodecGetCodecInfo(ci,&cinfo);  //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Flags: compr: 0x%X  decomp: 0x%X format: 0x%X/n",  cinfo.compressFlags, cinfo.decompressFlags, cinfo.formatFlags);  //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Codec name: %.*s/n",((unsigned char*)&cinfo.typeName)[0], ((unsigned char*)&cinfo.typeName)+1);  //make a yuy2 gworld  OutBufferRect.top=0;  OutBufferRect.left=0;  OutBufferRect.right=sh->disp_w;  OutBufferRect.bottom=sh->disp_h;  //Fill the imagedescription for our SVQ3 frame  //we can probably get this from Demuxer  if(!sh->ImageDesc) sh->ImageDesc=(sh->bih+1); // hack for SVQ3-in-AVI  mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageDescription size: %d/n",((ImageDescription*)(sh->ImageDesc))->idSize);  framedescHandle=(ImageDescriptionHandle)NewHandleClear(((ImageDescription*)(sh->ImageDesc))->idSize);  memcpy(*framedescHandle,sh->ImageDesc,((ImageDescription*)(sh->ImageDesc))->idSize);  dump_ImageDescription(*framedescHandle);  //Find codecscomponent for video decompression  //    result = FindCodec ('SVQ1',anyCodec,&compressor,&decompressor );                   //    printf("FindCodec SVQ1 returned:%i compressor: 0x%X decompressor: 0x%X/n",result,compressor,decompressor);  sh->context = (void *)kYUVSPixelFormat;  #if 1  {    int imgfmt = sh->codec->outfmt[sh->outfmtidx];    int qt_imgfmt;    switch(imgfmt)    {    case IMGFMT_YUY2:      qt_imgfmt = kYUVSPixelFormat;      break;    case IMGFMT_YVU9:      qt_imgfmt = 0x73797639; //kYVU9PixelFormat;      break;    case IMGFMT_YV12:      qt_imgfmt = 0x79343230;      break;    case IMGFMT_UYVY:      qt_imgfmt = kUYVY422PixelFormat;      break;    case IMGFMT_YVYU:      qt_imgfmt = kYVYU422PixelFormat;      imgfmt = IMGFMT_YUY2;      break;    case IMGFMT_RGB16:      qt_imgfmt = k16LE555PixelFormat;      break;    case IMGFMT_BGR24:      qt_imgfmt = k24BGRPixelFormat;      break;    case IMGFMT_BGR32:      qt_imgfmt = k32BGRAPixelFormat;      break;    case IMGFMT_RGB32:      qt_imgfmt = k32RGBAPixelFormat;      break;    default:      mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Unknown requested csp/n");      return(0);        }    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"imgfmt: %s qt_imgfmt: %.4s/n", vo_format_name(imgfmt), (char *)&qt_imgfmt);    sh->context = (void *)qt_imgfmt;    if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,imgfmt)) return 0;  }  return 1;}
开发者ID:EQ4,项目名称:neonv2,代码行数:101,


示例23: mpcodecs_config_vo

int mpcodecs_config_vo(sh_video_t *sh, int w, int h,                       unsigned int preferred_outfmt){    int i, j;    int only_preferred = 1;    unsigned int out_fmt = 0;    int screen_size_x = 0;      //SCREEN_SIZE_X;    int screen_size_y = 0;      //SCREEN_SIZE_Y;    vf_instance_t *vf = sh->vfilter, *sc = NULL;    int palette = 0;    int vocfg_flags = 0;    if (w)        sh->disp_w = w;    if (h)        sh->disp_h = h;    if (!sh->disp_w || !sh->disp_h)        return 0;    mp_msg(MSGT_DECVIDEO, MSGL_V,           "VDec: vo config request - %d x %d (preferred colorspace: %s)/n", w,           h, vo_format_name(preferred_outfmt));//    if(!vf) return 1; // temp hack    if (get_video_quality_max(sh) <= 0 && divx_quality) {        // user wants postprocess but no pp filter yet:        sh->vfilter = vf = vf_open_filter(vf, "pp", NULL);    }    // check if libvo and codec has common outfmt (no conversion):  csp_again:    if (mp_msg_test(MSGT_DECVIDEO, MSGL_V)) {        vf_instance_t *f = vf;        mp_msg(MSGT_DECVIDEO, MSGL_V, "Trying filter chain:");        for (f = vf; f; f = f->next)            mp_msg(MSGT_DECVIDEO, MSGL_V, " %s", f->info->name);        mp_msg(MSGT_DECVIDEO, MSGL_V, "/n");    }    j = -1;    for (i = 0; only_preferred || i < CODECS_MAX_OUTFMT; i++) {        int flags;        if (i == CODECS_MAX_OUTFMT) {            i = 0;            only_preferred = 0;        }        out_fmt = sh->codec->outfmt[i];        if (only_preferred && out_fmt != preferred_outfmt)            continue;        if (out_fmt == (unsigned int) 0xFFFFFFFF)            continue;        // check (query) if codec really support this outfmt...        sh->outfmtidx = i;  // pass index to the control() function this way        if (mpvdec->control(sh, VDCTRL_QUERY_FORMAT, &out_fmt) ==            CONTROL_FALSE) {            mp_msg(MSGT_CPLAYER, MSGL_DBG2,                   "vo_debug: codec query_format(%s) returned FALSE/n",                   vo_format_name(out_fmt));            continue;        }        flags = vf->query_format(vf, out_fmt);        mp_msg(MSGT_CPLAYER, MSGL_DBG2,               "vo_debug: query(%s) returned 0x%X (i=%d) /n",               vo_format_name(out_fmt), flags, i);        if ((flags & VFCAP_CSP_SUPPORTED_BY_HW)            || (flags & VFCAP_CSP_SUPPORTED && j < 0)) {            j = i;            vo_flags = flags;            if (flags & VFCAP_CSP_SUPPORTED_BY_HW)                break;        } else if (!palette                   && !(flags &                        (VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_CSP_SUPPORTED))                   && (out_fmt == IMGFMT_RGB8 || out_fmt == IMGFMT_BGR8)) {            palette = 1;        }    }    if (j < 0 && !IMGFMT_IS_HWACCEL(preferred_outfmt)) {        // TODO: no match - we should use conversion...        if (strcmp(vf->info->name, "scale") && palette != -1) {            mp_msg(MSGT_DECVIDEO, MSGL_INFO, MSGTR_CouldNotFindColorspace);            sc = vf = vf_open_filter(vf, "scale", NULL);            goto csp_again;        } else if (palette == 1) {            mp_msg(MSGT_DECVIDEO, MSGL_V, "vd: Trying -vf palette.../n");            palette = -1;            vf = vf_open_filter(vf, "palette", NULL);            goto csp_again;        } else {                // sws failed, if the last filter (vf_vo) support MPEGPES try to append vf_lavc            vf_instance_t *vo, *vp = NULL, *ve, *vpp = NULL;            // Remove the scale filter if we added it ourselves            if (vf == sc) {                ve = vf;                vf = vf->next;                vf_uninit_filter(ve);            }            // Find the last filter (vf_vo)            for (vo = vf; vo->next; vo = vo->next) {//.........这里部分代码省略.........
开发者ID:sherpya,项目名称:MPlayer,代码行数:101,


示例24: reconfig

static int reconfig(struct vf_instance *vf, struct mp_image_params *in,                    struct mp_image_params *out){    int width = in->w, height = in->h, d_width = in->d_w, d_height = in->d_h;    unsigned int outfmt = in->imgfmt;    unsigned int best = find_best_out(vf, outfmt);    int round_w = 0, round_h = 0;    if (!best) {        MP_WARN(vf, "SwScale: no supported outfmt found :(/n");        return -1;    }    vf->next->query_format(vf->next, best);    vf->priv->w = vf->priv->cfg_w;    vf->priv->h = vf->priv->cfg_h;    if (vf->priv->w <= -8) {        vf->priv->w += 8;        round_w = 1;    }    if (vf->priv->h <= -8) {        vf->priv->h += 8;        round_h = 1;    }    if (vf->priv->w < -3 || vf->priv->h < -3 ||        (vf->priv->w < -1 && vf->priv->h < -1))    {        // TODO: establish a direct connection to the user's brain        // and find out what the heck he thinks MPlayer should do        // with this nonsense.        MP_ERR(vf, "SwScale: EUSERBROKEN Check your parameters, they make no sense!/n");        return -1;    }    if (vf->priv->w == -1)        vf->priv->w = width;    if (vf->priv->w == 0)        vf->priv->w = d_width;    if (vf->priv->h == -1)        vf->priv->h = height;    if (vf->priv->h == 0)        vf->priv->h = d_height;    if (vf->priv->w == -3)        vf->priv->w = vf->priv->h * width / height;    if (vf->priv->w == -2)        vf->priv->w = vf->priv->h * d_width / d_height;    if (vf->priv->h == -3)        vf->priv->h = vf->priv->w * height / width;    if (vf->priv->h == -2)        vf->priv->h = vf->priv->w * d_height / d_width;    if (round_w)        vf->priv->w = ((vf->priv->w + 8) / 16) * 16;    if (round_h)        vf->priv->h = ((vf->priv->h + 8) / 16) * 16;    // check for upscaling, now that all parameters had been applied    if (vf->priv->noup) {        if ((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup) {            vf->priv->w = width;            vf->priv->h = height;        }    }    MP_DBG(vf, "SwScale: scaling %dx%d %s to %dx%d %s  /n",           width, height, vo_format_name(outfmt), vf->priv->w, vf->priv->h,           vo_format_name(best));    // Compute new d_width and d_height, preserving aspect    // while ensuring that both are >= output size in pixels.    if (vf->priv->h * d_width > vf->priv->w * d_height) {        d_width = vf->priv->h * d_width / d_height;        d_height = vf->priv->h;    } else {        d_height = vf->priv->w * d_height / d_width;        d_width = vf->priv->w;    }    *out = *in;    out->w = vf->priv->w;    out->h = vf->priv->h;    out->d_w = d_width;    out->d_h = d_height;    out->imgfmt = best;    // Second-guess what libswscale is going to output and what not.    // It depends what libswscale supports for in/output, and what makes sense.    struct mp_imgfmt_desc s_fmt = mp_imgfmt_get_desc(in->imgfmt);    struct mp_imgfmt_desc d_fmt = mp_imgfmt_get_desc(out->imgfmt);    // keep colorspace settings if the data stays in yuv    if (!(s_fmt.flags & MP_IMGFLAG_YUV) || !(d_fmt.flags & MP_IMGFLAG_YUV)) {        out->colorspace = MP_CSP_AUTO;        out->colorlevels = MP_CSP_LEVELS_AUTO;    }//.........这里部分代码省略.........
开发者ID:amosonn,项目名称:mpv,代码行数:101,


示例25: vidix_query_fourcc

uint32_t vidix_query_fourcc(uint32_t format){  if( mp_msg_test(MSGT_VO,MSGL_DBG2) ) {    mp_msg(MSGT_VO,MSGL_DBG2, "vosub_vidix: query_format was called: %x (%s)/n",format,vo_format_name(format)); }  vidix_fourcc.fourcc = format;  vdlQueryFourcc(vidix_handler,&vidix_fourcc);  if (vidix_fourcc.depth == VID_DEPTH_NONE)    return 0;  return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_HWSCALE_UP|VFCAP_HWSCALE_DOWN|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;}
开发者ID:lidongliangfly,项目名称:Intrisit201202,代码行数:10,


示例26: write_video

void write_video(struct MPContext *mpctx, double endpts){    struct MPOpts *opts = mpctx->opts;    struct vo *vo = mpctx->video_out;    if (!mpctx->d_video)        return;    // Actual playback starts when both audio and video are ready.    if (mpctx->video_status == STATUS_READY)        return;    if (mpctx->paused && mpctx->video_status >= STATUS_READY)        return;    int r = video_output_image(mpctx, endpts);    MP_TRACE(mpctx, "video_output_image: %d/n", r);    if (r < 0)        goto error;    if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.        return;    if (r == VD_EOF) {        mpctx->video_status =            vo_still_displaying(vo) ? STATUS_DRAINING : STATUS_EOF;        mpctx->delay = 0;        mpctx->last_av_difference = 0;        MP_DBG(mpctx, "video EOF (status=%d)/n", mpctx->video_status);        return;    }    if (mpctx->video_status > STATUS_PLAYING)        mpctx->video_status = STATUS_PLAYING;    if (r != VD_NEW_FRAME) {        mpctx->sleeptime = 0; // Decode more in next iteration.        return;    }    // Filter output is different from VO input?    struct mp_image_params p = mpctx->next_frames[0]->params;    if (!vo->params || !mp_image_params_equal(&p, vo->params)) {        // Changing config deletes the current frame; wait until it's finished.        if (vo_still_displaying(vo))            return;        const struct vo_driver *info = mpctx->video_out->driver;        char extra[20] = {0};        if (p.w != p.d_w || p.h != p.d_h)            snprintf(extra, sizeof(extra), " => %dx%d", p.d_w, p.d_h);        MP_INFO(mpctx, "VO: [%s] %dx%d%s %s/n",                info->name, p.w, p.h, extra, vo_format_name(p.imgfmt));        MP_VERBOSE(mpctx, "VO: Description: %s/n", info->description);        int vo_r = vo_reconfig(vo, &p, 0);        if (vo_r < 0) {            mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED;            goto error;        }        init_vo(mpctx);    }    mpctx->time_frame -= get_relative_time(mpctx);    update_avsync_before_frame(mpctx);    double time_frame = MPMAX(mpctx->time_frame, -1);    int64_t pts = mp_time_us() + (int64_t)(time_frame * 1e6);    // wait until VO wakes us up to get more frames    if (!vo_is_ready_for_frame(vo, pts)) {        if (video_feed_async_filter(mpctx) < 0)            goto error;        return;    }    assert(mpctx->num_next_frames >= 1);    struct vo_frame dummy = {        .pts = pts,        .duration = -1,        .num_frames = mpctx->num_next_frames,    };    for (int n = 0; n < dummy.num_frames; n++)        dummy.frames[n] = mpctx->next_frames[n];    struct vo_frame *frame = vo_frame_ref(&dummy);    double diff = -1;    double vpts0 = mpctx->next_frames[0]->pts;    double vpts1 = MP_NOPTS_VALUE;    if (mpctx->num_next_frames >= 2)        vpts1 = mpctx->next_frames[1]->pts;    if (vpts0 != MP_NOPTS_VALUE && vpts1 != MP_NOPTS_VALUE)        diff = vpts1 - vpts0;    if (diff < 0 && mpctx->d_video->fps > 0)        diff = 1.0 / mpctx->d_video->fps; // fallback to demuxer-reported fps    if (opts->untimed || vo->driver->untimed)        diff = -1; // disable frame dropping and aspects of frame timing    if (diff >= 0) {        // expected A/V sync correction is ignored//.........这里部分代码省略.........
开发者ID:Jim-Duke,项目名称:mpv,代码行数:101,


示例27: vidix_init

int      vidix_init(unsigned src_width,unsigned src_height,		   unsigned x_org,unsigned y_org,unsigned dst_width,		   unsigned dst_height,unsigned format,unsigned dest_bpp,		   unsigned vid_w,unsigned vid_h){  void *tmp, *tmpa;  size_t i;  int err;  uint32_t sstride,apitch;  if( mp_msg_test(MSGT_VO,MSGL_DBG2) )     mp_msg(MSGT_VO,MSGL_DBG2, "vosub_vidix: vidix_init() was called/n"    	    "src_w=%u src_h=%u dest_x_y_w_h = %u %u %u %u/n"	    "format=%s dest_bpp=%u vid_w=%u vid_h=%u/n"	    ,src_width,src_height,x_org,y_org,dst_width,dst_height	    ,vo_format_name(format),dest_bpp,vid_w,vid_h);	if(vidix_query_fourcc(format) == 0)	{	  mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_UnsupportedFourccForThisVidixDriver,	    format,vo_format_name(format));	  return -1;	} 	if(((vidix_cap.maxwidth != -1) && (vid_w > vidix_cap.maxwidth)) ||	    ((vidix_cap.minwidth != -1) && (vid_w < vidix_cap.minwidth)) ||	    ((vidix_cap.maxheight != -1) && (vid_h > vidix_cap.maxheight)) ||	    ((vidix_cap.minwidth != -1 ) && (vid_h < vidix_cap.minheight)))	{	  mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedResolution,	    vid_w, vid_h, vidix_cap.minwidth, vidix_cap.minheight,	    vidix_cap.maxwidth, vidix_cap.maxheight);	  return -1;	}	err = 0;	switch(dest_bpp)	{	  case 1: err = ((vidix_fourcc.depth & VID_DEPTH_1BPP) != VID_DEPTH_1BPP); break;	  case 2: err = ((vidix_fourcc.depth & VID_DEPTH_2BPP) != VID_DEPTH_2BPP); break;	  case 4: err = ((vidix_fourcc.depth & VID_DEPTH_4BPP) != VID_DEPTH_4BPP); break;	  case 8: err = ((vidix_fourcc.depth & VID_DEPTH_8BPP) != VID_DEPTH_8BPP); break;	  case 12:err = ((vidix_fourcc.depth & VID_DEPTH_12BPP) != VID_DEPTH_12BPP); break;	  case 15:err = ((vidix_fourcc.depth & VID_DEPTH_15BPP) != VID_DEPTH_15BPP); break;	  case 16:err = ((vidix_fourcc.depth & VID_DEPTH_16BPP) != VID_DEPTH_16BPP); break;	  case 24:err = ((vidix_fourcc.depth & VID_DEPTH_24BPP) != VID_DEPTH_24BPP); break;	  case 32:err = ((vidix_fourcc.depth & VID_DEPTH_32BPP) != VID_DEPTH_32BPP); break;	  default: err=1; break;	}	if(err)	{	  mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedColorDepth	  ,vidix_fourcc.depth);	  return -1;	}	if((dst_width > src_width || dst_height > src_height) && (vidix_cap.flags & FLAG_UPSCALER) != FLAG_UPSCALER)	{	  mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_DriverCantUpscaleImage,	  src_width, src_height, dst_width, dst_height);	  return -1;	}	if((dst_width > src_width || dst_height > src_height) && (vidix_cap.flags & FLAG_DOWNSCALER) != FLAG_DOWNSCALER)	{	  mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_DriverCantDownscaleImage,	  src_width, src_height, dst_width, dst_height);	  return -1;	}	image_width = src_width;	image_height = src_height;	src_format = format;	if(forced_fourcc) format = forced_fourcc;	memset(&vidix_play,0,sizeof(vidix_playback_t));	vidix_play.fourcc = format;	vidix_play.capability = vidix_cap.flags; /* every ;) */	vidix_play.blend_factor = 0; /* for now */	/* display the full picture.	   Nick: we could implement here zooming to a specified area -- alex */	vidix_play.src.x = vidix_play.src.y = 0;	vidix_play.src.w = src_width;	vidix_play.src.h = src_height;	vidix_play.dest.x = x_org;	vidix_play.dest.y = y_org;	vidix_play.dest.w = dst_width;	vidix_play.dest.h = dst_height;//	vidix_play.num_frames=vo_doublebuffering?NUM_FRAMES-1:1;	/* we aren't mad...3 buffers are more than enough */	vidix_play.num_frames=vo_doublebuffering?3:1;	vidix_play.src.pitch.y = vidix_play.src.pitch.u = vidix_play.src.pitch.v = 0;	if((err=vdlConfigPlayback(vidix_handler,&vidix_play))!=0)	{ 		mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_CantConfigurePlayback,strerror(err));		return -1;	}	if ( mp_msg_test(MSGT_VO,MSGL_V) ) {		mp_msg(MSGT_VO,MSGL_V, "vosub_vidix: using %d buffer(s)/n", vidix_play.num_frames); }	vidix_mem = vidix_play.dga_addr;	tmp = calloc(image_width, image_height);	tmpa = malloc(image_width * image_height);//.........这里部分代码省略.........
开发者ID:lidongliangfly,项目名称:Intrisit201202,代码行数:101,



注:本文中的vo_format_name函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ voc_get_session_id函数代码示例
C++ vnode_put函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。