这篇教程C++ GST_VIDEO_FRAME_COMP_DATA函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中GST_VIDEO_FRAME_COMP_DATA函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_VIDEO_FRAME_COMP_DATA函数的具体用法?C++ GST_VIDEO_FRAME_COMP_DATA怎么用?C++ GST_VIDEO_FRAME_COMP_DATA使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了GST_VIDEO_FRAME_COMP_DATA函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: gst_pixbufscale_transform_framestatic GstFlowReturngst_pixbufscale_transform_frame (GstVideoFilter * filter, GstVideoFrame * in, GstVideoFrame * out){ GstPixbufScale *scale; GdkPixbuf *src_pixbuf, *dest_pixbuf; scale = GST_PIXBUFSCALE (filter); src_pixbuf = gdk_pixbuf_new_from_data (GST_VIDEO_FRAME_COMP_DATA (in, 0), GDK_COLORSPACE_RGB, FALSE, 8, GST_VIDEO_FRAME_WIDTH (in), GST_VIDEO_FRAME_HEIGHT (in), GST_VIDEO_FRAME_COMP_STRIDE (in, 0), NULL, NULL); dest_pixbuf = gdk_pixbuf_new_from_data (GST_VIDEO_FRAME_COMP_DATA (out, 0), GDK_COLORSPACE_RGB, FALSE, 8, GST_VIDEO_FRAME_WIDTH (out), GST_VIDEO_FRAME_HEIGHT (out), GST_VIDEO_FRAME_COMP_STRIDE (out, 0), NULL, NULL); gdk_pixbuf_scale (src_pixbuf, dest_pixbuf, 0, 0, GST_VIDEO_FRAME_WIDTH (out), GST_VIDEO_FRAME_HEIGHT (out), 0, 0, (double) GST_VIDEO_FRAME_WIDTH (out) / GST_VIDEO_FRAME_WIDTH (in), (double) GST_VIDEO_FRAME_HEIGHT (out) / GST_VIDEO_FRAME_HEIGHT (in), scale->gdk_method); g_object_unref (src_pixbuf); g_object_unref (dest_pixbuf); return GST_FLOW_OK;}
开发者ID:Lachann,项目名称:gst-plugins-good,代码行数:33,
示例2: NS_ASSERTIONvoid GStreamerReader::ImageDataFromVideoFrame(GstVideoFrame *aFrame, PlanarYCbCrImage::Data *aData){ NS_ASSERTION(GST_VIDEO_INFO_IS_YUV(&mVideoInfo), "Non-YUV video frame formats not supported"); NS_ASSERTION(GST_VIDEO_FRAME_N_COMPONENTS(aFrame) == 3, "Unsupported number of components in video frame"); aData->mPicX = aData->mPicY = 0; aData->mPicSize = gfx::IntSize(mPicture.width, mPicture.height); aData->mStereoMode = StereoMode::MONO; aData->mYChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 0); aData->mYStride = GST_VIDEO_FRAME_COMP_STRIDE(aFrame, 0); aData->mYSize = gfx::IntSize(GST_VIDEO_FRAME_COMP_WIDTH(aFrame, 0), GST_VIDEO_FRAME_COMP_HEIGHT(aFrame, 0)); aData->mYSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 0) - 1; aData->mCbCrStride = GST_VIDEO_FRAME_COMP_STRIDE(aFrame, 1); aData->mCbCrSize = gfx::IntSize(GST_VIDEO_FRAME_COMP_WIDTH(aFrame, 1), GST_VIDEO_FRAME_COMP_HEIGHT(aFrame, 1)); aData->mCbChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 1); aData->mCrChannel = GST_VIDEO_FRAME_COMP_DATA(aFrame, 2); aData->mCbSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 1) - 1; aData->mCrSkip = GST_VIDEO_FRAME_COMP_PSTRIDE(aFrame, 2) - 1;}
开发者ID:msliu,项目名称:gecko-dev,代码行数:25,
示例3: gst_video_balance_packed_yuvstatic voidgst_video_balance_packed_yuv (GstVideoBalance * videobalance, GstVideoFrame * frame){ gint x, y, stride; guint8 *ydata, *udata, *vdata; gint yoff, uoff, voff; gint width, height; gint width2, height2; guint8 *tabley = videobalance->tabley; guint8 **tableu = videobalance->tableu; guint8 **tablev = videobalance->tablev; width = GST_VIDEO_FRAME_WIDTH (frame); height = GST_VIDEO_FRAME_HEIGHT (frame); stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0); ydata = GST_VIDEO_FRAME_COMP_DATA (frame, 0); yoff = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0); for (y = 0; y < height; y++) { guint8 *yptr; yptr = ydata + y * stride; for (x = 0; x < width; x++) { *yptr = tabley[*yptr]; yptr += yoff; } } width2 = GST_VIDEO_FRAME_COMP_WIDTH (frame, 1); height2 = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 1); udata = GST_VIDEO_FRAME_COMP_DATA (frame, 1); vdata = GST_VIDEO_FRAME_COMP_DATA (frame, 2); uoff = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 1); voff = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 2); for (y = 0; y < height2; y++) { guint8 *uptr, *vptr; guint8 u1, v1; uptr = udata + y * stride; vptr = vdata + y * stride; for (x = 0; x < width2; x++) { u1 = *uptr; v1 = *vptr; *uptr = tableu[u1][v1]; *vptr = tablev[u1][v1]; uptr += uoff; vptr += voff; } }}
开发者ID:PeterXu,项目名称:gst-mobile,代码行数:57,
示例4: gst_smpte_blend_i420static voidgst_smpte_blend_i420 (GstVideoFrame * frame1, GstVideoFrame * frame2, GstVideoFrame * oframe, GstMask * mask, gint border, gint pos){ guint32 *maskp; gint value; gint i, j; gint min, max; guint8 *in1, *in2, *out, *in1u, *in1v, *in2u, *in2v, *outu, *outv; gint width, height; if (border == 0) border++; min = pos - border; max = pos; width = GST_VIDEO_FRAME_WIDTH (frame1); height = GST_VIDEO_FRAME_HEIGHT (frame1); in1 = GST_VIDEO_FRAME_COMP_DATA (frame1, 0); in2 = GST_VIDEO_FRAME_COMP_DATA (frame2, 0); out = GST_VIDEO_FRAME_COMP_DATA (oframe, 0); in1u = GST_VIDEO_FRAME_COMP_DATA (frame1, 1); in1v = GST_VIDEO_FRAME_COMP_DATA (frame1, 2); in2u = GST_VIDEO_FRAME_COMP_DATA (frame2, 1); in2v = GST_VIDEO_FRAME_COMP_DATA (frame2, 2); outu = GST_VIDEO_FRAME_COMP_DATA (oframe, 1); outv = GST_VIDEO_FRAME_COMP_DATA (oframe, 2); maskp = mask->data; for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { value = *maskp++; value = ((CLAMP (value, min, max) - min) << 8) / border; out[j] = ((in1[j] * value) + (in2[j] * (256 - value))) >> 8; if (!(i & 1) && !(j & 1)) { outu[j / 2] = ((in1u[j / 2] * value) + (in2u[j / 2] * (256 - value))) >> 8; outv[j / 2] = ((in1v[j / 2] * value) + (in2v[j / 2] * (256 - value))) >> 8; } } in1 += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 0); in2 += GST_VIDEO_FRAME_COMP_STRIDE (frame2, 0); out += GST_VIDEO_FRAME_COMP_STRIDE (oframe, 0); if (!(i & 1)) { in1u += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 1); in2u += GST_VIDEO_FRAME_COMP_STRIDE (frame2, 1); in1v += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 2); in2v += GST_VIDEO_FRAME_COMP_STRIDE (frame1, 2); outu += GST_VIDEO_FRAME_COMP_STRIDE (oframe, 1); outv += GST_VIDEO_FRAME_COMP_STRIDE (oframe, 2); } }
开发者ID:PeterXu,项目名称:gst-mobile,代码行数:60,
示例5: gst_av1_enc_fill_imagestatic voidgst_av1_enc_fill_image (GstAV1Enc * enc, GstVideoFrame * frame, aom_image_t * image){ image->planes[AOM_PLANE_Y] = GST_VIDEO_FRAME_COMP_DATA (frame, 0); image->planes[AOM_PLANE_U] = GST_VIDEO_FRAME_COMP_DATA (frame, 1); image->planes[AOM_PLANE_V] = GST_VIDEO_FRAME_COMP_DATA (frame, 2); image->stride[AOM_PLANE_Y] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); image->stride[AOM_PLANE_U] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1); image->stride[AOM_PLANE_V] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);}
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:12,
示例6: gst_gaussianblur_transform_framestatic GstFlowReturngst_gaussianblur_transform_frame (GstVideoFilter * vfilter, GstVideoFrame * in_frame, GstVideoFrame * out_frame){ GstGaussianBlur *filter = GST_GAUSSIANBLUR (vfilter); GstClockTime timestamp; gint64 stream_time; gfloat sigma; guint8 *src, *dest; /* GstController: update the properties */ timestamp = GST_BUFFER_TIMESTAMP (in_frame->buffer); stream_time = gst_segment_to_stream_time (&GST_BASE_TRANSFORM (filter)->segment, GST_FORMAT_TIME, timestamp); GST_DEBUG_OBJECT (filter, "sync to %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); if (GST_CLOCK_TIME_IS_VALID (stream_time)) gst_object_sync_values (GST_OBJECT (filter), stream_time); GST_OBJECT_LOCK (filter); sigma = filter->sigma; GST_OBJECT_UNLOCK (filter); if (filter->cur_sigma != sigma) { g_free (filter->kernel); filter->kernel = NULL; g_free (filter->kernel_sum); filter->kernel_sum = NULL; filter->cur_sigma = sigma; } if (filter->kernel == NULL && !make_gaussian_kernel (filter, filter->cur_sigma)) { GST_ELEMENT_ERROR (filter, RESOURCE, NO_SPACE_LEFT, ("Out of memory"), ("Failed to allocation gaussian kernel")); return GST_FLOW_ERROR; } /* * Perform gaussian smoothing on the image using the input standard * deviation. */ src = GST_VIDEO_FRAME_COMP_DATA (in_frame, 0); dest = GST_VIDEO_FRAME_COMP_DATA (out_frame, 0); gst_video_frame_copy (out_frame, in_frame); gaussian_smooth (filter, src, dest); return GST_FLOW_OK;}
开发者ID:ylatuya,项目名称:gst-plugins-bad,代码行数:51,
示例7: yadif_filtervoidyadif_filter (GstYadif * yadif, int parity, int tff){ int y, i; const GstVideoInfo *vi = &yadif->video_info; const GstVideoFormatInfo *vfi = vi->finfo; for (i = 0; i < GST_VIDEO_FORMAT_INFO_N_COMPONENTS (vfi); i++) { int w = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (vfi, i, vi->width); int h = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (vfi, i, vi->height); int refs = GST_VIDEO_INFO_COMP_STRIDE (vi, i); int df = GST_VIDEO_INFO_COMP_PSTRIDE (vi, i); guint8 *prev_data = GST_VIDEO_FRAME_COMP_DATA (&yadif->prev_frame, i); guint8 *cur_data = GST_VIDEO_FRAME_COMP_DATA (&yadif->cur_frame, i); guint8 *next_data = GST_VIDEO_FRAME_COMP_DATA (&yadif->next_frame, i); guint8 *dest_data = GST_VIDEO_FRAME_COMP_DATA (&yadif->dest_frame, i); for (y = 0; y < h; y++) { if ((y ^ parity) & 1) { guint8 *prev = prev_data + y * refs; guint8 *cur = cur_data + y * refs; guint8 *next = next_data + y * refs; guint8 *dst = dest_data + y * refs; int mode = ((y == 1) || (y + 2 == h)) ? 2 : yadif->mode;#if HAVE_CPU_X86_64 if (0) { filter_line_c (dst, prev, cur, next, w, y + 1 < h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode); } else { filter_line_x86_64 (dst, prev, cur, next, w, y + 1 < h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode); }#else filter_line_c (dst, prev, cur, next, w, y + 1 < h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode);#endif } else { guint8 *dst = dest_data + y * refs; guint8 *cur = cur_data + y * refs; memcpy (dst, cur, w * df); } } }#if 0 emms_c ();#endif}
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:49,
示例8: fill_image_planar8_3static voidfill_image_planar8_3 (opj_image_t * image, GstVideoFrame * frame){ gint c, x, y, w, h; const guint8 *data_in, *tmp; gint *data_out; gint sstride; for (c = 0; c < 3; c++) { w = GST_VIDEO_FRAME_COMP_WIDTH (frame, c); h = GST_VIDEO_FRAME_COMP_HEIGHT (frame, c); data_in = GST_VIDEO_FRAME_COMP_DATA (frame, c); sstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, c); data_out = image->comps[c].data; for (y = 0; y < h; y++) { tmp = data_in; for (x = 0; x < w; x++) { *data_out = *tmp; data_out++; tmp++; } data_in += sstride; } }}
开发者ID:ndufresne,项目名称:gst-plugins-bad,代码行数:26,
示例9: gst_gdk_pixbuf_sink_get_pixbuf_from_bufferstatic GdkPixbuf *gst_gdk_pixbuf_sink_get_pixbuf_from_buffer (GstGdkPixbufSink * sink, GstBuffer * buf){ GdkPixbuf *pix = NULL; GstVideoFrame *frame; gint minsize, bytes_per_pixel; g_return_val_if_fail (GST_VIDEO_SINK_WIDTH (sink) > 0, NULL); g_return_val_if_fail (GST_VIDEO_SINK_HEIGHT (sink) > 0, NULL); frame = g_slice_new0 (GstVideoFrame); gst_video_frame_map (frame, &sink->info, buf, GST_MAP_READ); bytes_per_pixel = (sink->has_alpha) ? 4 : 3; /* last row needn't have row padding */ minsize = (GST_VIDEO_FRAME_COMP_STRIDE (frame, 0) * (GST_VIDEO_SINK_HEIGHT (sink) - 1)) + (bytes_per_pixel * GST_VIDEO_SINK_WIDTH (sink)); g_return_val_if_fail (gst_buffer_get_size (buf) >= minsize, NULL); gst_buffer_ref (buf); pix = gdk_pixbuf_new_from_data (GST_VIDEO_FRAME_COMP_DATA (frame, 0), GDK_COLORSPACE_RGB, sink->has_alpha, 8, GST_VIDEO_SINK_WIDTH (sink), GST_VIDEO_SINK_HEIGHT (sink), GST_VIDEO_FRAME_COMP_STRIDE (frame, 0), (GdkPixbufDestroyNotify) gst_gdk_pixbuf_sink_pixbuf_destroy_notify, frame); return pix;}
开发者ID:BigBrother-International,项目名称:gst-plugins-good,代码行数:32,
示例10: user_endrow_callbackstatic voiduser_endrow_callback (png_structp png_ptr, png_bytep new_row, png_uint_32 row_num, int pass){ GstPngDec *pngdec = NULL; pngdec = GST_PNGDEC (png_get_io_ptr (png_ptr)); /* FIXME: implement interlaced pictures */ /* If buffer_out doesn't exist, it means buffer_alloc failed, which * will already have set the return code */ if (GST_IS_BUFFER (pngdec->current_frame->output_buffer)) { GstVideoFrame frame; GstBuffer *buffer = pngdec->current_frame->output_buffer; size_t offset; gint width; guint8 *data; if (!gst_video_frame_map (&frame, &pngdec->output_state->info, buffer, GST_MAP_WRITE)) { pngdec->ret = GST_FLOW_ERROR; return; } data = GST_VIDEO_FRAME_COMP_DATA (&frame, 0); offset = row_num * GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0); GST_LOG ("got row %u, copying in buffer %p at offset %" G_GSIZE_FORMAT, (guint) row_num, pngdec->current_frame->output_buffer, offset); width = GST_ROUND_UP_4 (png_get_rowbytes (pngdec->png, pngdec->info)); memcpy (data + offset, new_row, width); gst_video_frame_unmap (&frame); pngdec->ret = GST_FLOW_OK; }}
开发者ID:lubing521,项目名称:gst-embedded-builder,代码行数:35,
示例11: theora_enc_init_bufferstatic voidtheora_enc_init_buffer (th_ycbcr_buffer buf, GstVideoFrame * frame){ GstVideoInfo vinfo; guint i; /* According to Theora developer Timothy Terriberry, the Theora * encoder will not use memory outside of pic_width/height, even when * the frame size is bigger. The values outside this region will be encoded * to default values. * Due to this, setting the frame's width/height as the buffer width/height * is perfectly ok, even though it does not strictly look ok. */ gst_video_info_init (&vinfo); gst_video_info_set_format (&vinfo, GST_VIDEO_FRAME_FORMAT (frame), GST_ROUND_UP_16 (GST_VIDEO_FRAME_WIDTH (frame)), GST_ROUND_UP_16 (GST_VIDEO_FRAME_HEIGHT (frame))); for (i = 0; i < 3; i++) { buf[i].width = GST_VIDEO_INFO_COMP_WIDTH (&vinfo, i); buf[i].height = GST_VIDEO_INFO_COMP_HEIGHT (&vinfo, i); buf[i].data = GST_VIDEO_FRAME_COMP_DATA (frame, i); buf[i].stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, i); }}
开发者ID:rawoul,项目名称:gst-plugins-base,代码行数:26,
示例12: fill_frame_planar16_3static voidfill_frame_planar16_3 (GstVideoFrame * frame, opj_image_t * image){ gint c, x, y, w, h; guint16 *data_out, *tmp; const gint *data_in; gint dstride; gint shift; for (c = 0; c < 3; c++) { w = GST_VIDEO_FRAME_COMP_WIDTH (frame, c); h = GST_VIDEO_FRAME_COMP_HEIGHT (frame, c); dstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, c) / 2; data_out = (guint16 *) GST_VIDEO_FRAME_COMP_DATA (frame, c); data_in = image->comps[c].data; shift = 16 - image->comps[c].prec; for (y = 0; y < h; y++) { tmp = data_out; for (x = 0; x < w; x++) { *tmp = *data_in << shift; tmp++; data_in++; } data_out += dstride; } }}
开发者ID:cbetz421,项目名称:gst-plugins-bad,代码行数:29,
示例13: user_endrow_callbackstatic voiduser_endrow_callback (png_structp png_ptr, png_bytep new_row, png_uint_32 row_num, int pass){ GstPngDec *pngdec = NULL; pngdec = GST_PNGDEC (png_get_io_ptr (png_ptr)); /* If buffer_out doesn't exist, it means buffer_alloc failed, which * will already have set the return code */ if (new_row && GST_IS_BUFFER (pngdec->current_frame->output_buffer)) { GstVideoFrame frame; GstBuffer *buffer = pngdec->current_frame->output_buffer; size_t offset; guint8 *data; if (!gst_video_frame_map (&frame, &pngdec->output_state->info, buffer, GST_MAP_WRITE)) { pngdec->ret = GST_FLOW_ERROR; return; } data = GST_VIDEO_FRAME_COMP_DATA (&frame, 0); offset = row_num * GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0); GST_LOG ("got row %u at pass %d, copying in buffer %p at offset %" G_GSIZE_FORMAT, (guint) row_num, pass, pngdec->current_frame->output_buffer, offset); png_progressive_combine_row (pngdec->png, data + offset, new_row); gst_video_frame_unmap (&frame); pngdec->ret = GST_FLOW_OK; } else pngdec->ret = GST_FLOW_OK;}
开发者ID:DylanZA,项目名称:gst-plugins-good,代码行数:33,
示例14: gst_jpeg_dec_decode_rgbstatic voidgst_jpeg_dec_decode_rgb (GstJpegDec * dec, GstVideoFrame * frame){ guchar *r_rows[16], *g_rows[16], *b_rows[16]; guchar **scanarray[3] = { r_rows, g_rows, b_rows }; gint i, j, k; gint lines; guint8 *base[3]; guint pstride, rstride; gint width, height; GST_DEBUG_OBJECT (dec, "indirect decoding of RGB"); width = GST_VIDEO_FRAME_WIDTH (frame); height = GST_VIDEO_FRAME_HEIGHT (frame); if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width)))) return; for (i = 0; i < 3; i++) base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i); pstride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0); rstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); memcpy (r_rows, dec->idr_y, 16 * sizeof (gpointer)); memcpy (g_rows, dec->idr_u, 16 * sizeof (gpointer)); memcpy (b_rows, dec->idr_v, 16 * sizeof (gpointer)); i = 0; while (i < height) { lines = jpeg_read_raw_data (&dec->cinfo, scanarray, DCTSIZE); if (G_LIKELY (lines > 0)) { for (j = 0; (j < DCTSIZE) && (i < height); j++, i++) { gint p; p = 0; for (k = 0; k < width; k++) { base[0][p] = r_rows[j][k]; base[1][p] = g_rows[j][k]; base[2][p] = b_rows[j][k]; p += pstride; } base[0] += rstride; base[1] += rstride; base[2] += rstride; } } else { GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0"); } }}
开发者ID:an146,项目名称:gst-plugins-good,代码行数:52,
示例15: daala_handle_image/* Allocate buffer and copy image data into Y444 format */static GstFlowReturndaala_handle_image (GstDaalaDec * dec, od_img * img, GstVideoCodecFrame * frame){ GstVideoDecoder *decoder = GST_VIDEO_DECODER (dec); gint width, height, stride; GstFlowReturn result; gint i, comp; guint8 *dest, *src; GstVideoFrame vframe; result = gst_video_decoder_allocate_output_frame (decoder, frame); if (G_UNLIKELY (result != GST_FLOW_OK)) { GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s", gst_flow_get_name (result)); return result; } /* if only libdaala would allow us to give it a destination frame */ GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, dec, "doing unavoidable video frame copy"); if (G_UNLIKELY (!gst_video_frame_map (&vframe, &dec->output_state->info, frame->output_buffer, GST_MAP_WRITE))) goto invalid_frame; for (comp = 0; comp < 3; comp++) { width = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, comp); height = GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, comp); stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, comp); dest = GST_VIDEO_FRAME_COMP_DATA (&vframe, comp); src = img->planes[comp].data; for (i = 0; i < height; i++) { memcpy (dest, src, width); dest += stride; src += img->planes[comp].ystride; } } gst_video_frame_unmap (&vframe); return GST_FLOW_OK;invalid_frame: { GST_DEBUG_OBJECT (dec, "could not map video frame"); return GST_FLOW_ERROR; }}
开发者ID:Distrotech,项目名称:gst-plugins-bad,代码行数:51,
示例16: gst_vp9_dec_image_to_bufferstatic voidgst_vp9_dec_image_to_buffer (GstVP9Dec * dec, const vpx_image_t * img, GstBuffer * buffer){ int deststride, srcstride, height, width, line, comp; guint8 *dest, *src; GstVideoFrame frame; GstVideoInfo *info = &dec->output_state->info; if (!gst_video_frame_map (&frame, info, buffer, GST_MAP_WRITE)) { GST_ERROR_OBJECT (dec, "Could not map video buffer"); return; } for (comp = 0; comp < 3; comp++) { dest = GST_VIDEO_FRAME_COMP_DATA (&frame, comp); src = img->planes[comp]; width = GST_VIDEO_FRAME_COMP_WIDTH (&frame, comp) * GST_VIDEO_FRAME_COMP_PSTRIDE (&frame, comp); height = GST_VIDEO_FRAME_COMP_HEIGHT (&frame, comp); deststride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, comp); srcstride = img->stride[comp]; if (srcstride == deststride) { GST_TRACE_OBJECT (dec, "Stride matches. Comp %d: %d, copying full plane", comp, srcstride); memcpy (dest, src, srcstride * height); } else { GST_TRACE_OBJECT (dec, "Stride mismatch. Comp %d: %d != %d, copying " "line by line.", comp, srcstride, deststride); for (line = 0; line < height; line++) { memcpy (dest, src, width); dest += deststride; src += srcstride; } } } gst_video_frame_unmap (&frame);}
开发者ID:ndufresne,项目名称:gst-plugins-good,代码行数:40,
示例17: gst_gamma_planar_yuv_ipstatic voidgst_gamma_planar_yuv_ip (GstGamma * gamma, GstVideoFrame * frame){ gint i, j, height; gint width, stride, row_wrap; const guint8 *table = gamma->gamma_table; guint8 *data; data = GST_VIDEO_FRAME_COMP_DATA (frame, 0); stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); width = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0); height = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0); row_wrap = stride - width; for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { *data = table[*data]; data++; } data += row_wrap; }}
开发者ID:adesurya,项目名称:gst-mobile,代码行数:22,
示例18: sink_handoff_cb_I420static voidsink_handoff_cb_I420 (GstElement * object, GstBuffer * buffer, GstPad * pad, gpointer user_data){ guint *sink_pos = (guint *) user_data; gboolean contains_text = (*sink_pos == 1 || *sink_pos == 2); guint c, i, j; gboolean all_red = TRUE; guint8 *comp; gint comp_stride, comp_width, comp_height; const guint8 color[] = { 81, 90, 240 }; GstVideoInfo info; GstVideoFrame frame; gst_video_info_init (&info); gst_video_info_set_format (&info, GST_VIDEO_FORMAT_I420, 640, 480); gst_video_frame_map (&frame, &info, buffer, GST_MAP_READ); for (c = 0; c < 3; c++) { comp = GST_VIDEO_FRAME_COMP_DATA (&frame, c); comp_stride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, c); comp_width = GST_VIDEO_FRAME_COMP_WIDTH (&frame, c); comp_height = GST_VIDEO_FRAME_COMP_HEIGHT (&frame, c); for (i = 0; i < comp_height; i++) { for (j = 0; j < comp_width; j++) { all_red = all_red && (comp[i * comp_stride + j] == color[c]); } } } gst_video_frame_unmap (&frame); fail_unless (contains_text != all_red, "Frame %d is incorrect (all red %d, contains text %d)", *sink_pos, all_red, contains_text); *sink_pos = *sink_pos + 1;}
开发者ID:lubing521,项目名称:gst-embedded-builder,代码行数:38,
示例19: fill_image_planar16_1static voidfill_image_planar16_1 (opj_image_t * image, GstVideoFrame * frame){ gint x, y, w, h; const guint16 *data_in, *tmp; gint *data_out; gint sstride; w = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0); h = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0); data_in = (guint16 *) GST_VIDEO_FRAME_COMP_DATA (frame, 0); sstride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0) / 2; data_out = image->comps[0].data; for (y = 0; y < h; y++) { tmp = data_in; for (x = 0; x < w; x++) { *data_out = *tmp; data_out++; tmp++; } data_in += sstride; }}
开发者ID:ndufresne,项目名称:gst-plugins-bad,代码行数:24,
示例20: gst_smooth_transform_framestatic GstFlowReturngst_smooth_transform_frame (GstVideoFilter * vfilter, GstVideoFrame * in_frame, GstVideoFrame * out_frame){ GstSmooth *smooth; smooth = GST_SMOOTH (vfilter); if (!smooth->active) { gst_video_frame_copy (out_frame, in_frame); return GST_FLOW_OK; } smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 0), GST_VIDEO_FRAME_COMP_DATA (in_frame, 0), GST_VIDEO_FRAME_COMP_WIDTH (in_frame, 0), GST_VIDEO_FRAME_COMP_HEIGHT (in_frame, 0), GST_VIDEO_FRAME_COMP_STRIDE (in_frame, 0), GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 0), smooth->tolerance, smooth->filtersize); if (!smooth->luma_only) { smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 1), GST_VIDEO_FRAME_COMP_DATA (in_frame, 1), GST_VIDEO_FRAME_COMP_WIDTH (in_frame, 1), GST_VIDEO_FRAME_COMP_HEIGHT (in_frame, 1), GST_VIDEO_FRAME_COMP_STRIDE (in_frame, 1), GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 1), smooth->tolerance, smooth->filtersize); smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 2), GST_VIDEO_FRAME_COMP_DATA (in_frame, 2), GST_VIDEO_FRAME_COMP_WIDTH (in_frame, 2), GST_VIDEO_FRAME_COMP_HEIGHT (in_frame, 2), GST_VIDEO_FRAME_COMP_STRIDE (in_frame, 2), GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 2), smooth->tolerance, smooth->filtersize); } else { gst_video_frame_copy_plane (out_frame, in_frame, 1); gst_video_frame_copy_plane (out_frame, in_frame, 2); } return GST_FLOW_OK;}
开发者ID:lubing521,项目名称:gst-embedded-builder,代码行数:42,
示例21: pgs_composition_object_renderstatic voidpgs_composition_object_render (PgsCompositionObject * obj, SpuState * state, GstVideoFrame * frame){ SpuColour *colour; guint8 *planes[3]; /* YUV frame pointers */ gint strides[3]; guint8 *data, *end; guint16 obj_w; guint16 obj_h G_GNUC_UNUSED; guint x, y, i, min_x, max_x; if (G_UNLIKELY (obj->rle_data == NULL || obj->rle_data_size == 0 || obj->rle_data_used != obj->rle_data_size)) return; data = obj->rle_data; end = data + obj->rle_data_used; if (data + 4 > end) return; /* FIXME: Calculate and use the cropping window for the output, as the * intersection of the crop rectangle for this object (if any) and the * window specified by the object's window_id */ /* Store the start of each plane */ planes[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0); planes[1] = GST_VIDEO_FRAME_COMP_DATA (frame, 1); planes[2] = GST_VIDEO_FRAME_COMP_DATA (frame, 2); strides[0] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); strides[1] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1); strides[2] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2); y = MIN (obj->y, state->info.height); planes[0] += strides[0] * y; planes[1] += strides[1] * (y / 2); planes[2] += strides[2] * (y / 2); /* RLE data: */ obj_w = GST_READ_UINT16_BE (data); obj_h = GST_READ_UINT16_BE (data + 2); data += 4; min_x = MIN (obj->x, strides[0]); max_x = MIN (obj->x + obj_w, strides[0]); state->comp_left = x = min_x; state->comp_right = max_x; gstspu_clear_comp_buffers (state); while (data < end) { guint8 pal_id; guint16 run_len; pal_id = *data++; if (pal_id != 0) { run_len = 1; } else { if (data + 1 > end) return; switch (data[0] & 0xC0) { case 0x00: run_len = (data[0] & 0x3f); data++; break; case 0x40: if (data + 2 > end) return; run_len = ((data[0] << 8) | data[1]) & 0x3fff; data += 2; break; case 0x80: if (data + 2 > end) return; run_len = (data[0] & 0x3f); pal_id = data[1]; data += 2; break; case 0xC0: if (data + 3 > end) return; run_len = ((data[0] << 8) | data[1]) & 0x3fff; pal_id = data[2]; data += 3; break; default: run_len = 0; break; } } colour = &state->pgs.palette[pal_id]; if (colour->A) { guint32 inv_A = 0xff - colour->A; if (G_UNLIKELY (x + run_len > max_x)) run_len = (max_x - x);//.........这里部分代码省略.........
开发者ID:drothlis,项目名称:gst-plugins-bad,代码行数:101,
示例22: gst_video_detect_yuvstatic voidgst_video_detect_yuv (GstSimpleVideoMarkDetect * simplevideomarkdetect, GstVideoFrame * frame){ gdouble brightness; gint i, pw, ph, row_stride, pixel_stride; gint width, height, offset_calc, x, y; guint8 *d; guint64 pattern_data; gint total_pattern; width = frame->info.width; height = frame->info.height; pw = simplevideomarkdetect->pattern_width; ph = simplevideomarkdetect->pattern_height; row_stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0); d = GST_VIDEO_FRAME_COMP_DATA (frame, 0); /* move to start of bottom left, adjust for offsets */ offset_calc = row_stride * (height - ph - simplevideomarkdetect->bottom_offset) + pixel_stride * simplevideomarkdetect->left_offset; x = simplevideomarkdetect->left_offset; y = height - ph - simplevideomarkdetect->bottom_offset; total_pattern = simplevideomarkdetect->pattern_count + simplevideomarkdetect->pattern_data_count; /* If x and y offset values are outside the video, no need to analyze */ if ((x + (pw * total_pattern)) < 0 || x > width || (y + height) < 0 || y > height) { GST_ERROR_OBJECT (simplevideomarkdetect, "simplevideomarkdetect pattern is outside the video. Not Analyzing."); return; } /* Offset calculation less than 0, then reset to 0 */ if (offset_calc < 0) offset_calc = 0; /* Y position of mark is negative or pattern exceeds the video height, then recalculate pattern height for partial display */ if (y < 0) ph += y; else if ((y + ph) > height) ph = height - y; /* If pattern height is less than 0, need not analyze anything */ if (ph < 0) return; /* move to start of bottom left */ d += offset_calc; /* analyze the bottom left pixels */ for (i = 0; i < simplevideomarkdetect->pattern_count; i++) { gint draw_pw; /* calc brightness of width * height box */ brightness = gst_video_detect_calc_brightness (simplevideomarkdetect, d, pw, ph, row_stride, pixel_stride); GST_DEBUG_OBJECT (simplevideomarkdetect, "brightness %f", brightness); if (i & 1) { /* odd pixels must be white, all pixels darker than the center + * sensitivity are considered wrong. */ if (brightness < (simplevideomarkdetect->pattern_center + simplevideomarkdetect->pattern_sensitivity)) goto no_pattern; } else { /* even pixels must be black, pixels lighter than the center - sensitivity * are considered wrong. */ if (brightness > (simplevideomarkdetect->pattern_center - simplevideomarkdetect->pattern_sensitivity)) goto no_pattern; } /* X position of mark is negative or pattern exceeds the video width, then recalculate pattern width for partial display */ draw_pw = calculate_pw (pw, x, width); /* If pattern width is less than 0, continue with the next pattern */ if (draw_pw < 0) continue; /* move to i-th pattern */ d += pixel_stride * draw_pw; x += draw_pw; if ((x + (pw * (total_pattern - i - 1))) < 0 || x >= width) break; } GST_DEBUG_OBJECT (simplevideomarkdetect, "found pattern"); pattern_data = 0; /* get the data of the pattern */ for (i = 0; i < simplevideomarkdetect->pattern_data_count; i++) {//.........这里部分代码省略.........
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:101,
示例23: gst_rtp_vraw_pay_handle_bufferstatic GstFlowReturngst_rtp_vraw_pay_handle_buffer (GstRTPBasePayload * payload, GstBuffer * buffer){ GstRtpVRawPay *rtpvrawpay; GstFlowReturn ret = GST_FLOW_OK; gfloat packets_per_packline; guint pgroups_per_packet; guint packlines_per_list, buffers_per_list; guint lines_delay; /* after how many packed lines we push out a buffer list */ guint last_line; /* last pack line number we pushed out a buffer list */ guint line, offset; guint8 *p0, *yp, *up, *vp; guint ystride, uvstride; guint xinc, yinc; guint pgroup; guint mtu; guint width, height; gint field, fields; GstVideoFormat format; GstVideoFrame frame; gint interlaced; gboolean use_buffer_lists; GstBufferList *list = NULL; GstRTPBuffer rtp = { NULL, }; rtpvrawpay = GST_RTP_VRAW_PAY (payload); gst_video_frame_map (&frame, &rtpvrawpay->vinfo, buffer, GST_MAP_READ); GST_LOG_OBJECT (rtpvrawpay, "new frame of %" G_GSIZE_FORMAT " bytes", gst_buffer_get_size (buffer)); /* get pointer and strides of the planes */ p0 = GST_VIDEO_FRAME_PLANE_DATA (&frame, 0); yp = GST_VIDEO_FRAME_COMP_DATA (&frame, 0); up = GST_VIDEO_FRAME_COMP_DATA (&frame, 1); vp = GST_VIDEO_FRAME_COMP_DATA (&frame, 2); ystride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0); uvstride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1); mtu = GST_RTP_BASE_PAYLOAD_MTU (payload); /* amount of bytes for one pixel */ pgroup = rtpvrawpay->pgroup; width = GST_VIDEO_INFO_WIDTH (&rtpvrawpay->vinfo); height = GST_VIDEO_INFO_HEIGHT (&rtpvrawpay->vinfo); interlaced = GST_VIDEO_INFO_IS_INTERLACED (&rtpvrawpay->vinfo); format = GST_VIDEO_INFO_FORMAT (&rtpvrawpay->vinfo); yinc = rtpvrawpay->yinc; xinc = rtpvrawpay->xinc; /* after how many packed lines we push out a buffer list */ lines_delay = GST_ROUND_UP_4 (height / rtpvrawpay->chunks_per_frame); /* calculate how many buffers we expect to store in a single buffer list */ pgroups_per_packet = (mtu - (12 + 14)) / pgroup; packets_per_packline = width / (xinc * pgroups_per_packet * 1.0); packlines_per_list = height / (yinc * rtpvrawpay->chunks_per_frame); buffers_per_list = packlines_per_list * packets_per_packline; buffers_per_list = GST_ROUND_UP_8 (buffers_per_list); use_buffer_lists = (rtpvrawpay->chunks_per_frame < (height / yinc)); fields = 1 + interlaced; /* start with line 0, offset 0 */ for (field = 0; field < fields; field++) { line = field; offset = 0; last_line = 0; if (use_buffer_lists) list = gst_buffer_list_new_sized (buffers_per_list); /* write all lines */ while (line < height) { guint left, pack_line; GstBuffer *out; guint8 *outdata, *headers; gboolean next_line, complete = FALSE; guint length, cont, pixels; /* get the max allowed payload length size, we try to fill the complete MTU */ left = gst_rtp_buffer_calc_payload_len (mtu, 0, 0); out = gst_rtp_buffer_new_allocate (left, 0, 0); if (field == 0) { GST_BUFFER_PTS (out) = GST_BUFFER_PTS (buffer); } else { GST_BUFFER_PTS (out) = GST_BUFFER_PTS (buffer) + GST_BUFFER_DURATION (buffer) / 2; } gst_rtp_buffer_map (out, GST_MAP_WRITE, &rtp); outdata = gst_rtp_buffer_get_payload (&rtp);//.........这里部分代码省略.........
开发者ID:Distrotech,项目名称:gst-plugins-good,代码行数:101,
示例24: deinterlace_frame_di_greedyh_packedstatic voiddeinterlace_frame_di_greedyh_packed (GstDeinterlaceMethod * method, const GstDeinterlaceField * history, guint history_count, GstVideoFrame * outframe, int cur_field_idx){ GstDeinterlaceMethodGreedyH *self = GST_DEINTERLACE_METHOD_GREEDY_H (method); GstDeinterlaceMethodGreedyHClass *klass = GST_DEINTERLACE_METHOD_GREEDY_H_GET_CLASS (self); gint InfoIsOdd = 0; gint Line; gint RowStride = GST_VIDEO_FRAME_COMP_STRIDE (outframe, 0); gint FieldHeight = GST_VIDEO_INFO_HEIGHT (method->vinfo) / 2; gint Pitch = RowStride * 2; const guint8 *L1; // ptr to Line1, of 3 const guint8 *L2; // ptr to Line2, the weave line const guint8 *L3; // ptr to Line3 const guint8 *L2P; // ptr to prev Line2 guint8 *Dest = GST_VIDEO_FRAME_COMP_DATA (outframe, 0); ScanlineFunction scanline; if (cur_field_idx + 2 > history_count || cur_field_idx < 1) { GstDeinterlaceMethod *backup_method; backup_method = g_object_new (gst_deinterlace_method_linear_get_type (), NULL); gst_deinterlace_method_setup (backup_method, method->vinfo); gst_deinterlace_method_deinterlace_frame (backup_method, history, history_count, outframe, cur_field_idx); g_object_unref (backup_method); return; } cur_field_idx += 2; switch (GST_VIDEO_INFO_FORMAT (method->vinfo)) { case GST_VIDEO_FORMAT_YUY2: case GST_VIDEO_FORMAT_YVYU: scanline = klass->scanline_yuy2; break; case GST_VIDEO_FORMAT_UYVY: scanline = klass->scanline_uyvy; break; case GST_VIDEO_FORMAT_AYUV: scanline = klass->scanline_ayuv; break; default: g_assert_not_reached (); return; } // copy first even line no matter what, and the first odd line if we're // processing an EVEN field. (note diff from other deint rtns.) if (history[cur_field_idx - 1].flags == PICTURE_INTERLACED_BOTTOM) { InfoIsOdd = 1; L1 = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 2].frame, 0); if (history[cur_field_idx - 2].flags & PICTURE_INTERLACED_BOTTOM) L1 += RowStride; L2 = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 1].frame, 0); if (history[cur_field_idx - 1].flags & PICTURE_INTERLACED_BOTTOM) L2 += RowStride; L3 = L1 + Pitch; L2P = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 3].frame, 0); if (history[cur_field_idx - 3].flags & PICTURE_INTERLACED_BOTTOM) L2P += RowStride; // copy first even line memcpy (Dest, L1, RowStride); Dest += RowStride; } else { InfoIsOdd = 0; L1 = GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 2].frame, 0); if (history[cur_field_idx - 2].flags & PICTURE_INTERLACED_BOTTOM) L1 += RowStride; L2 = (guint8 *) GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 1].frame, 0) + Pitch; if (history[cur_field_idx - 1].flags & PICTURE_INTERLACED_BOTTOM) L2 += RowStride; L3 = L1 + Pitch; L2P = (guint8 *) GST_VIDEO_FRAME_COMP_DATA (history[cur_field_idx - 3].frame, 0) + Pitch; if (history[cur_field_idx - 3].flags & PICTURE_INTERLACED_BOTTOM) L2P += RowStride; // copy first even line memcpy (Dest, L1, RowStride); Dest += RowStride; // then first odd line memcpy (Dest, L1, RowStride); Dest += RowStride; }//.........这里部分代码省略.........
开发者ID:collects,项目名称:gst-plugins-good,代码行数:101,
示例25: _gst_libde265_return_imagestatic GstFlowReturn_gst_libde265_return_image (GstVideoDecoder * decoder, GstVideoCodecFrame * frame, const struct de265_image *img){ GstLibde265Dec *dec = GST_LIBDE265_DEC (decoder); struct GstLibde265FrameRef *ref; GstFlowReturn result; GstVideoFrame outframe; GstVideoCodecFrame *out_frame; int frame_number; int plane; ref = (struct GstLibde265FrameRef *) de265_get_image_plane_user_data (img, 0); if (ref != NULL) { /* decoder is using direct rendering */ out_frame = gst_video_codec_frame_ref (ref->frame); if (frame != NULL) { gst_video_codec_frame_unref (frame); } gst_buffer_replace (&out_frame->output_buffer, ref->buffer); gst_buffer_replace (&ref->buffer, NULL); return gst_video_decoder_finish_frame (decoder, out_frame); } result = _gst_libde265_image_available (decoder, de265_get_image_width (img, 0), de265_get_image_height (img, 0)); if (result != GST_FLOW_OK) { GST_ERROR_OBJECT (dec, "Failed to notify about available image"); return result; } frame_number = (uintptr_t) de265_get_image_user_data (img) - 1; if (frame_number != -1) { out_frame = gst_video_decoder_get_frame (decoder, frame_number); } else { out_frame = NULL; } if (frame != NULL) { gst_video_codec_frame_unref (frame); } if (out_frame == NULL) { GST_ERROR_OBJECT (dec, "No frame available to return"); return GST_FLOW_ERROR; } result = gst_video_decoder_allocate_output_frame (decoder, out_frame); if (result != GST_FLOW_OK) { GST_ERROR_OBJECT (dec, "Failed to allocate output frame"); return result; } g_assert (dec->output_state != NULL); if (!gst_video_frame_map (&outframe, &dec->output_state->info, out_frame->output_buffer, GST_MAP_WRITE)) { GST_ERROR_OBJECT (dec, "Failed to map output buffer"); return GST_FLOW_ERROR; } for (plane = 0; plane < 3; plane++) { int width = de265_get_image_width (img, plane); int height = de265_get_image_height (img, plane); int srcstride = width; int dststride = GST_VIDEO_FRAME_COMP_STRIDE (&outframe, plane); const uint8_t *src = de265_get_image_plane (img, plane, &srcstride); uint8_t *dest = GST_VIDEO_FRAME_COMP_DATA (&outframe, plane); if (srcstride == width && dststride == width) { memcpy (dest, src, height * width); } else { while (height--) { memcpy (dest, src, width); src += srcstride; dest += dststride; } } } gst_video_frame_unmap (&outframe); return gst_video_decoder_finish_frame (decoder, out_frame);}
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:80,
示例26: gst_rtp_vraw_pay_handle_bufferstatic GstFlowReturngst_rtp_vraw_pay_handle_buffer (GstRTPBasePayload * payload, GstBuffer * buffer){ GstRtpVRawPay *rtpvrawpay; GstFlowReturn ret = GST_FLOW_OK; guint line, offset; guint8 *yp, *up, *vp; guint ystride, uvstride; guint pgroup; guint mtu; guint width, height; gint field; GstVideoFrame frame; gint interlaced; GstRTPBuffer rtp = { NULL, }; rtpvrawpay = GST_RTP_VRAW_PAY (payload); gst_video_frame_map (&frame, &rtpvrawpay->vinfo, buffer, GST_MAP_READ); GST_LOG_OBJECT (rtpvrawpay, "new frame of %" G_GSIZE_FORMAT " bytes", gst_buffer_get_size (buffer)); /* get pointer and strides of the planes */ yp = GST_VIDEO_FRAME_COMP_DATA (&frame, 0); up = GST_VIDEO_FRAME_COMP_DATA (&frame, 1); vp = GST_VIDEO_FRAME_COMP_DATA (&frame, 2); ystride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0); uvstride = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1); mtu = GST_RTP_BASE_PAYLOAD_MTU (payload); /* amount of bytes for one pixel */ pgroup = rtpvrawpay->pgroup; width = GST_VIDEO_INFO_WIDTH (&rtpvrawpay->vinfo); height = GST_VIDEO_INFO_HEIGHT (&rtpvrawpay->vinfo); interlaced = GST_VIDEO_INFO_IS_INTERLACED (&rtpvrawpay->vinfo); /* start with line 0, offset 0 */ for (field = 0; field < 1 + interlaced; field++) { line = field; offset = 0; /* write all lines */ while (line < height) { guint left; GstBuffer *out; guint8 *outdata, *headers; gboolean next_line; guint length, cont, pixels; /* get the max allowed payload length size, we try to fill the complete MTU */ left = gst_rtp_buffer_calc_payload_len (mtu, 0, 0); out = gst_rtp_buffer_new_allocate (left, 0, 0); if (field == 0) { GST_BUFFER_TIMESTAMP (out) = GST_BUFFER_TIMESTAMP (buffer); } else { GST_BUFFER_TIMESTAMP (out) = GST_BUFFER_TIMESTAMP (buffer) + GST_BUFFER_DURATION (buffer) / 2; } gst_rtp_buffer_map (out, GST_MAP_WRITE, &rtp); outdata = gst_rtp_buffer_get_payload (&rtp); GST_LOG_OBJECT (rtpvrawpay, "created buffer of size %u for MTU %u", left, mtu); /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Extended Sequence Number | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |F| Line No |C| Offset | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Length |F| Line No | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |C| Offset | . * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ . * . . * . Two (partial) lines of video data . * . . * +---------------------------------------------------------------+ */ /* need 2 bytes for the extended sequence number */ *outdata++ = 0; *outdata++ = 0; left -= 2; /* the headers start here */ headers = outdata; /* while we can fit at least one header and one pixel */ while (left > (6 + pgroup)) { /* we need a 6 bytes header */ left -= 6;//.........这里部分代码省略.........
开发者ID:felipemogollon,项目名称:gst-plugins-good,代码行数:101,
示例27: gst_compare_ssimstatic gdoublegst_compare_ssim (GstCompare * comp, GstBuffer * buf1, GstCaps * caps1, GstBuffer * buf2, GstCaps * caps2){ GstVideoInfo info1, info2; GstVideoFrame frame1, frame2; gint i, comps; gdouble cssim[4], ssim, c[4] = { 1.0, 0.0, 0.0, 0.0 }; if (!caps1) goto invalid_input; if (!gst_video_info_from_caps (&info1, caps1)) goto invalid_input; if (!caps2) goto invalid_input; if (!gst_video_info_from_caps (&info2, caps1)) goto invalid_input; if (GST_VIDEO_INFO_FORMAT (&info1) != GST_VIDEO_INFO_FORMAT (&info2) || GST_VIDEO_INFO_WIDTH (&info1) != GST_VIDEO_INFO_WIDTH (&info2) || GST_VIDEO_INFO_HEIGHT (&info1) != GST_VIDEO_INFO_HEIGHT (&info2)) return comp->threshold + 1; comps = GST_VIDEO_INFO_N_COMPONENTS (&info1); /* note that some are reported both yuv and gray */ for (i = 0; i < comps; ++i) c[i] = 1.0; /* increase luma weight if yuv */ if (GST_VIDEO_INFO_IS_YUV (&info1) && (comps > 1)) c[0] = comps - 1; for (i = 0; i < comps; ++i) c[i] /= (GST_VIDEO_INFO_IS_YUV (&info1) && (comps > 1)) ? 2 * (comps - 1) : comps; gst_video_frame_map (&frame1, &info1, buf1, GST_MAP_READ); gst_video_frame_map (&frame2, &info2, buf2, GST_MAP_READ); for (i = 0; i < comps; i++) { gint cw, ch, step, stride; /* only support most common formats */ if (GST_VIDEO_INFO_COMP_DEPTH (&info1, i) != 8) goto unsupported_input; cw = GST_VIDEO_FRAME_COMP_WIDTH (&frame1, i); ch = GST_VIDEO_FRAME_COMP_HEIGHT (&frame1, i); step = GST_VIDEO_FRAME_COMP_PSTRIDE (&frame1, i); stride = GST_VIDEO_FRAME_COMP_STRIDE (&frame1, i); GST_LOG_OBJECT (comp, "component %d", i); cssim[i] = gst_compare_ssim_component (comp, GST_VIDEO_FRAME_COMP_DATA (&frame1, i), GST_VIDEO_FRAME_COMP_DATA (&frame2, i), cw, ch, step, stride); GST_LOG_OBJECT (comp, "ssim[%d] = %f", i, cssim[i]); } gst_video_frame_unmap (&frame1); gst_video_frame_unmap (&frame2);#ifndef GST_DISABLE_GST_DEBUG for (i = 0; i < 4; i++) { GST_DEBUG_OBJECT (comp, "ssim[%d] = %f, c[%d] = %f", i, cssim[i], i, c[i]); }#endif ssim = cssim[0] * c[0] + cssim[1] * c[1] + cssim[2] * c[2] + cssim[3] * c[3]; return ssim; /* ERRORS */invalid_input: { GST_ERROR_OBJECT (comp, "ssim method needs raw video input"); return 0; }unsupported_input: { GST_ERROR_OBJECT (comp, "raw video format not supported %" GST_PTR_FORMAT, caps1); return 0; }}
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:84,
示例28: theora_handle_image/* Allocate buffer and copy image data into Y444 format */static GstFlowReturntheora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstVideoCodecFrame * frame){ GstVideoDecoder *decoder = GST_VIDEO_DECODER (dec); gint width, height, stride; GstFlowReturn result; gint i, comp; guint8 *dest, *src; GstVideoFrame vframe; gint pic_width, pic_height; gint offset_x, offset_y; result = gst_video_decoder_allocate_output_frame (decoder, frame); if (G_UNLIKELY (result != GST_FLOW_OK)) { GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s", gst_flow_get_name (result)); return result; } if (!dec->can_crop) { /* we need to crop the hard way */ offset_x = dec->info.pic_x; offset_y = dec->info.pic_y; pic_width = dec->info.pic_width; pic_height = dec->info.pic_height; /* Ensure correct offsets in chroma for formats that need it * by rounding the offset. libtheora will add proper pixels, * so no need to handle them ourselves. */ if (offset_x & 1 && dec->info.pixel_fmt != TH_PF_444) offset_x--; if (offset_y & 1 && dec->info.pixel_fmt == TH_PF_420) offset_y--; } else { /* copy the whole frame */ offset_x = 0; offset_y = 0; pic_width = dec->info.frame_width; pic_height = dec->info.frame_height; if (dec->info.pic_width != dec->info.frame_width || dec->info.pic_height != dec->info.frame_height || dec->info.pic_x != 0 || dec->info.pic_y != 0) { GstVideoMeta *vmeta; GstVideoCropMeta *cmeta; vmeta = gst_buffer_get_video_meta (frame->output_buffer); /* If the buffer pool didn't add the meta already * we add it ourselves here */ if (!vmeta) vmeta = gst_buffer_add_video_meta (frame->output_buffer, GST_VIDEO_FRAME_FLAG_NONE, dec->output_state->info.finfo->format, dec->info.frame_width, dec->info.frame_height); /* Just to be sure that the buffer pool doesn't do something * completely weird and we would crash later */ g_assert (vmeta->format == dec->output_state->info.finfo->format); g_assert (vmeta->width == dec->info.frame_width); g_assert (vmeta->height == dec->info.frame_height); cmeta = gst_buffer_add_video_crop_meta (frame->output_buffer); /* we can do things slightly more efficient when we know that * downstream understands clipping */ cmeta->x = dec->info.pic_x; cmeta->y = dec->info.pic_y; cmeta->width = dec->info.pic_width; cmeta->height = dec->info.pic_height; } } /* if only libtheora would allow us to give it a destination frame */ GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, dec, "doing unavoidable video frame copy"); if (G_UNLIKELY (!gst_video_frame_map (&vframe, &dec->output_state->info, frame->output_buffer, GST_MAP_WRITE))) goto invalid_frame; for (comp = 0; comp < 3; comp++) { width = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (vframe.info.finfo, comp, pic_width); height = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (vframe.info.finfo, comp, pic_height); stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, comp); dest = GST_VIDEO_FRAME_COMP_DATA (&vframe, comp); src = buf[comp].data; src += ((height == pic_height) ? offset_y : offset_y / 2) * buf[comp].stride; src += (width == pic_width) ? offset_x : offset_x / 2; for (i = 0; i < height; i++) { memcpy (dest, src, width);//.........这里部分代码省略.........
开发者ID:lubing521,项目名称:gst-embedded-builder,代码行数:101,
示例29: gst_dvdec_chainstatic GstFlowReturngst_dvdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf){ GstDVDec *dvdec; guint8 *inframe; guint8 *outframe_ptrs[3]; gint outframe_pitches[3]; GstMapInfo map; GstVideoFrame frame; GstBuffer *outbuf; GstFlowReturn ret = GST_FLOW_OK; guint length; guint64 cstart = GST_CLOCK_TIME_NONE, cstop = GST_CLOCK_TIME_NONE; gboolean PAL, wide; dvdec = GST_DVDEC (parent); gst_buffer_map (buf, &map, GST_MAP_READ); inframe = map.data; /* buffer should be at least the size of one NTSC frame, this should * be enough to decode the header. */ if (G_UNLIKELY (map.size < NTSC_BUFFER)) goto wrong_size; /* preliminary dropping. unref and return if outside of configured segment */ if ((dvdec->segment.format == GST_FORMAT_TIME) && (!(gst_segment_clip (&dvdec->segment, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buf), GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf), &cstart, &cstop)))) goto dropping; if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0)) goto parse_header_error; /* get size */ PAL = dv_system_50_fields (dvdec->decoder); wide = dv_format_wide (dvdec->decoder); /* check the buffer is of right size after we know if we are * dealing with PAL or NTSC */ length = (PAL ? PAL_BUFFER : NTSC_BUFFER); if (G_UNLIKELY (map.size < length)) goto wrong_size; dv_parse_packs (dvdec->decoder, inframe); if (dvdec->video_offset % dvdec->drop_factor != 0) goto skip; /* renegotiate on change */ if (PAL != dvdec->PAL || wide != dvdec->wide) { dvdec->src_negotiated = FALSE; dvdec->PAL = PAL; dvdec->wide = wide; } dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT); dvdec->interlaced = !dv_is_progressive (dvdec->decoder); /* negotiate if not done yet */ if (!dvdec->src_negotiated) { if (!gst_dvdec_src_negotiate (dvdec)) goto not_negotiated; } if (gst_pad_check_reconfigure (dvdec->srcpad)) { GstCaps *caps; caps = gst_pad_get_current_caps (dvdec->srcpad); if (!caps) goto not_negotiated; gst_dvdec_negotiate_pool (dvdec, caps, &dvdec->vinfo); gst_caps_unref (caps); } if (dvdec->need_segment) { gst_pad_push_event (dvdec->srcpad, gst_event_new_segment (&dvdec->segment)); dvdec->need_segment = FALSE; } ret = gst_buffer_pool_acquire_buffer (dvdec->pool, &outbuf, NULL); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto no_buffer; gst_video_frame_map (&frame, &dvdec->vinfo, outbuf, GST_MAP_WRITE); outframe_ptrs[0] = GST_VIDEO_FRAME_COMP_DATA (&frame, 0); outframe_pitches[0] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0); /* the rest only matters for YUY2 */ if (dvdec->bpp < 3) { outframe_ptrs[1] = GST_VIDEO_FRAME_COMP_DATA (&frame, 1); outframe_ptrs[2] = GST_VIDEO_FRAME_COMP_DATA (&frame, 2); outframe_pitches[1] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1); outframe_pitches[2] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 2);//.........这里部分代码省略.........
开发者ID:slkwyy,项目名称:gst-plugins-good,代码行数:101,
注:本文中的GST_VIDEO_FRAME_COMP_DATA函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ GST_VIDEO_FRAME_PLANE_DATA函数代码示例 C++ GST_VIDEO_CAPS_YUV函数代码示例 |