您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ GST_TIME_ARGS函数代码示例

51自学网 2021-06-01 20:57:04
  C++
这篇教程C++ GST_TIME_ARGS函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中GST_TIME_ARGS函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_TIME_ARGS函数的具体用法?C++ GST_TIME_ARGS怎么用?C++ GST_TIME_ARGS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了GST_TIME_ARGS函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: gst_base_rtp_audio_payload_flush

/** * gst_base_rtp_audio_payload_flush: * @baseaudiopayload: a #GstBaseRTPPayload * @payload_len: length of payload * @timestamp: a #GstClockTime * * Create an RTP buffer and store @payload_len bytes of the adapter as the * payload. Set the timestamp on the new buffer to @timestamp before pushing * the buffer downstream. * * If @payload_len is -1, all pending bytes will be flushed. If @timestamp is * -1, the timestamp will be calculated automatically. * * Returns: a #GstFlowReturn * * Since: 0.10.25 */GstFlowReturngst_base_rtp_audio_payload_flush (GstBaseRTPAudioPayload * baseaudiopayload,    guint payload_len, GstClockTime timestamp){  GstBaseRTPPayload *basepayload;  GstBaseRTPAudioPayloadPrivate *priv;  GstBuffer *outbuf;  guint8 *payload;  GstFlowReturn ret;  GstAdapter *adapter;  guint64 distance;  priv = baseaudiopayload->priv;  adapter = priv->adapter;  basepayload = GST_BASE_RTP_PAYLOAD (baseaudiopayload);  if (payload_len == -1)    payload_len = gst_adapter_available (adapter);  /* nothing to do, just return */  if (payload_len == 0)    return GST_FLOW_OK;  if (timestamp == -1) {    /* calculate the timestamp */    timestamp = gst_adapter_prev_timestamp (adapter, &distance);    GST_LOG_OBJECT (baseaudiopayload,        "last timestamp %" GST_TIME_FORMAT ", distance %" G_GUINT64_FORMAT,        GST_TIME_ARGS (timestamp), distance);    if (GST_CLOCK_TIME_IS_VALID (timestamp) && distance > 0) {      /* convert the number of bytes since the last timestamp to time and add to       * the last seen timestamp */      timestamp += priv->bytes_to_time (baseaudiopayload, distance);    }  }  GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,      payload_len, GST_TIME_ARGS (timestamp));  if (priv->buffer_list && gst_adapter_available_fast (adapter) >= payload_len) {    GstBuffer *buffer;    /* we can quickly take a buffer out of the adapter without having to copy     * anything. */    buffer = gst_adapter_take_buffer (adapter, payload_len);    ret = gst_base_rtp_audio_payload_push_buffer (baseaudiopayload, buffer);  } else {    /* create buffer to hold the payload */    outbuf = gst_rtp_buffer_new_allocate (payload_len, 0, 0);    /* copy payload */    payload = gst_rtp_buffer_get_payload (outbuf);    gst_adapter_copy (adapter, payload, 0, payload_len);    gst_adapter_flush (adapter, payload_len);    /* set metadata */    gst_base_rtp_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,        timestamp);    ret = gst_basertppayload_push (basepayload, outbuf);  }  return ret;}
开发者ID:genesi,项目名称:gst-base-plugins,代码行数:84,


示例2: gst_shape_wipe_video_sink_chain

static GstFlowReturngst_shape_wipe_video_sink_chain (GstPad * pad, GstBuffer * buffer){  GstShapeWipe *self = GST_SHAPE_WIPE (GST_PAD_PARENT (pad));  GstFlowReturn ret = GST_FLOW_OK;  GstBuffer *mask = NULL, *outbuf = NULL;  GstClockTime timestamp;  gboolean new_outbuf = FALSE;  if (G_UNLIKELY (self->fmt == GST_VIDEO_FORMAT_UNKNOWN))    return GST_FLOW_NOT_NEGOTIATED;  timestamp = GST_BUFFER_TIMESTAMP (buffer);  timestamp =      gst_segment_to_stream_time (&self->segment, GST_FORMAT_TIME, timestamp);  if (GST_CLOCK_TIME_IS_VALID (timestamp))    gst_object_sync_values (G_OBJECT (self), timestamp);  GST_DEBUG_OBJECT (self,      "Blending buffer with timestamp %" GST_TIME_FORMAT " at position %lf",      GST_TIME_ARGS (timestamp), self->mask_position);  g_mutex_lock (self->mask_mutex);  if (!self->mask)    g_cond_wait (self->mask_cond, self->mask_mutex);  if (self->mask == NULL) {    g_mutex_unlock (self->mask_mutex);    gst_buffer_unref (buffer);    return GST_FLOW_UNEXPECTED;  } else {    mask = gst_buffer_ref (self->mask);  }  g_mutex_unlock (self->mask_mutex);  if (!gst_shape_wipe_do_qos (self, GST_BUFFER_TIMESTAMP (buffer))) {    gst_buffer_unref (buffer);    gst_buffer_unref (mask);    return GST_FLOW_OK;  }  /* Try to blend inplace, if it's not possible   * get a new buffer from downstream.   */  if (!gst_buffer_is_writable (buffer)) {    ret =        gst_pad_alloc_buffer_and_set_caps (self->srcpad, GST_BUFFER_OFFSET_NONE,        GST_BUFFER_SIZE (buffer), GST_PAD_CAPS (self->srcpad), &outbuf);    if (G_UNLIKELY (ret != GST_FLOW_OK)) {      gst_buffer_unref (buffer);      gst_buffer_unref (mask);      return ret;    }    gst_buffer_copy_metadata (outbuf, buffer, GST_BUFFER_COPY_ALL);    new_outbuf = TRUE;  } else {    outbuf = buffer;  }  if (self->fmt == GST_VIDEO_FORMAT_AYUV && self->mask_bpp == 16)    ret = gst_shape_wipe_blend_ayuv_16 (self, buffer, mask, outbuf);  else if (self->fmt == GST_VIDEO_FORMAT_AYUV)    ret = gst_shape_wipe_blend_ayuv_8 (self, buffer, mask, outbuf);  else if (self->fmt == GST_VIDEO_FORMAT_ARGB && self->mask_bpp == 16)    ret = gst_shape_wipe_blend_argb_16 (self, buffer, mask, outbuf);  else if (self->fmt == GST_VIDEO_FORMAT_ARGB)    ret = gst_shape_wipe_blend_argb_8 (self, buffer, mask, outbuf);  else if (self->fmt == GST_VIDEO_FORMAT_BGRA && self->mask_bpp == 16)    ret = gst_shape_wipe_blend_bgra_16 (self, buffer, mask, outbuf);  else if (self->fmt == GST_VIDEO_FORMAT_BGRA)    ret = gst_shape_wipe_blend_bgra_8 (self, buffer, mask, outbuf);  else    g_assert_not_reached ();  gst_buffer_unref (mask);  if (new_outbuf)    gst_buffer_unref (buffer);  if (ret != GST_FLOW_OK) {    gst_buffer_unref (outbuf);    return ret;  }  ret = gst_pad_push (self->srcpad, outbuf);  return ret;}
开发者ID:bilboed,项目名称:gst-plugins-bad,代码行数:87,


示例3: gst_videoframe_audiolevel_asink_chain

//.........这里部分代码省略.........      if (g_queue_get_length (&self->vtimeq) < 2) {        vtemp = self->vsegment.position;      } else if (self->vsegment.position == GST_CLOCK_TIME_NONE) {        /* g_queue_get_length is surely >= 2 at this point         * so the adapter isn't empty */        buf =            gst_adapter_take_buffer (self->adapter,            gst_adapter_available (self->adapter));        if (buf != NULL) {          GstMessage *msg;          msg = update_rms_from_buffer (self, buf);          g_mutex_unlock (&self->mutex);          gst_element_post_message (GST_ELEMENT (self), msg);          gst_buffer_unref (buf);          g_mutex_lock (&self->mutex);  /* we unlock again later */        }        break;      }    } else if (g_queue_get_length (&self->vtimeq) < 2) {      continue;    }    vt0 = g_queue_pop_head (&self->vtimeq);    if (vtemp == GST_CLOCK_TIME_NONE)      vt1 = g_queue_peek_head (&self->vtimeq);    else      vt1 = &vtemp;    cur_time =        self->first_time + gst_util_uint64_scale (self->total_frames,        GST_SECOND, rate);    GST_DEBUG_OBJECT (self,        "Processing: current time is %" GST_TIME_FORMAT,        GST_TIME_ARGS (cur_time));    GST_DEBUG_OBJECT (self, "Total frames is %i with a rate of %d",        self->total_frames, rate);    GST_DEBUG_OBJECT (self, "Start time is %" GST_TIME_FORMAT,        GST_TIME_ARGS (self->first_time));    GST_DEBUG_OBJECT (self, "Time on top is %" GST_TIME_FORMAT,        GST_TIME_ARGS (*vt0));    if (cur_time < *vt0) {      guint num_frames =          gst_util_uint64_scale (*vt0 - cur_time, rate, GST_SECOND);      bytes = num_frames * GST_AUDIO_INFO_BPF (&self->ainfo);      available_bytes = gst_adapter_available (self->adapter);      if (available_bytes == 0) {        g_queue_push_head (&self->vtimeq, vt0);        break;      }      if (bytes == 0) {        cur_time = *vt0;      } else {        GST_DEBUG_OBJECT (self,            "Flushed %" G_GSIZE_FORMAT " out of %" G_GSIZE_FORMAT " bytes",            bytes, available_bytes);        gst_adapter_flush (self->adapter, MIN (bytes, available_bytes));        self->total_frames += num_frames;        if (available_bytes <= bytes) {          g_queue_push_head (&self->vtimeq, vt0);          break;        }        cur_time =            self->first_time + gst_util_uint64_scale (self->total_frames,            GST_SECOND, rate);      }
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:67,


示例4: discoverer_collect

/* Called when pipeline is pre-rolled */static voiddiscoverer_collect (GstDiscoverer * dc){  GST_DEBUG ("Collecting information");  /* Stop the timeout handler if present */  if (dc->priv->timeoutid) {    g_source_remove (dc->priv->timeoutid);    dc->priv->timeoutid = 0;  }  if (dc->priv->streams) {    /* FIXME : Make this querying optional */    if (TRUE) {      GstElement *pipeline = (GstElement *) dc->priv->pipeline;      GstFormat format = GST_FORMAT_TIME;      gint64 dur;      GST_DEBUG ("Attempting to query duration");      if (gst_element_query_duration (pipeline, &format, &dur)) {        if (format == GST_FORMAT_TIME) {          GST_DEBUG ("Got duration %" GST_TIME_FORMAT, GST_TIME_ARGS (dur));          dc->priv->current_info->duration = (guint64) dur;        }      }      if (dc->priv->seeking_query) {        if (gst_element_query (pipeline, dc->priv->seeking_query)) {          gboolean seekable;          gst_query_parse_seeking (dc->priv->seeking_query, &format,              &seekable, NULL, NULL);          if (format == GST_FORMAT_TIME) {            GST_DEBUG ("Got seekable %d", seekable);            dc->priv->current_info->seekable = seekable;          }        }      }    }    if (dc->priv->current_topology)      dc->priv->current_info->stream_info = parse_stream_topology (dc,          dc->priv->current_topology, NULL);    /*     * Images need some special handling. They do not have a duration, have     * caps named image/<foo> (th exception being MJPEG video which is also     * type image/jpeg), and should consist of precisely one stream (actually     * initially there are 2, the image and raw stream, but we squash these     * while parsing the stream topology). At some ponit, if we find that these     * conditions are not sufficient, we can count the number of decoders and     * parsers in the chain, and if there's more than one decoder, or any     * parser at all, we should not mark this as an image.     */    if (dc->priv->current_info->duration == 0 &&        dc->priv->current_info->stream_info != NULL &&        dc->priv->current_info->stream_info->next == NULL) {      GstStructure *st =          gst_caps_get_structure (dc->priv->current_info->stream_info->caps, 0);      if (g_str_has_prefix (gst_structure_get_name (st), "image/"))        ((GstDiscovererVideoInfo *) dc->priv->current_info->            stream_info)->is_image = TRUE;    }  }  if (dc->priv->async) {    GST_DEBUG ("Emitting 'discoverered'");    g_signal_emit (dc, gst_discoverer_signals[SIGNAL_DISCOVERED], 0,        dc->priv->current_info, dc->priv->current_error);    /* Clients get a copy of current_info since it is a boxed type */    gst_discoverer_info_unref (dc->priv->current_info);  }}
开发者ID:166MMX,项目名称:openjdk.java.net-openjfx-8u40-rt,代码行数:76,


示例5: gst_isoff_sidx_parser_add_buffer

//.........这里部分代码省略.........      }      if (parser->size == 1) {        if (gst_byte_reader_get_remaining (&reader) < 12) {          gst_byte_reader_set_pos (&reader, 0);          break;        }        parser->size = gst_byte_reader_get_uint64_be_unchecked (&reader);      }      if (parser->size == 0) {        res = GST_ISOFF_PARSER_ERROR;        gst_byte_reader_set_pos (&reader, 0);        break;      }      parser->sidx.version = gst_byte_reader_get_uint8_unchecked (&reader);      parser->sidx.flags = gst_byte_reader_get_uint24_le_unchecked (&reader);      parser->status = GST_ISOFF_SIDX_PARSER_HEADER;    case GST_ISOFF_SIDX_PARSER_HEADER:      remaining = gst_byte_reader_get_remaining (&reader);      if (remaining < 12 + (parser->sidx.version == 0 ? 8 : 16)) {        break;      }      parser->sidx.ref_id = gst_byte_reader_get_uint32_be_unchecked (&reader);      parser->sidx.timescale =          gst_byte_reader_get_uint32_be_unchecked (&reader);      if (parser->sidx.version == 0) {        parser->sidx.earliest_pts =            gst_byte_reader_get_uint32_be_unchecked (&reader);        parser->sidx.first_offset =            gst_byte_reader_get_uint32_be_unchecked (&reader);      } else {        parser->sidx.earliest_pts =            gst_byte_reader_get_uint64_be_unchecked (&reader);        parser->sidx.first_offset =            gst_byte_reader_get_uint64_be_unchecked (&reader);      }      /* skip 2 reserved bytes */      gst_byte_reader_skip_unchecked (&reader, 2);      parser->sidx.entries_count =          gst_byte_reader_get_uint16_be_unchecked (&reader);      GST_LOG ("Timescale: %" G_GUINT32_FORMAT, parser->sidx.timescale);      GST_LOG ("Earliest pts: %" G_GUINT64_FORMAT, parser->sidx.earliest_pts);      GST_LOG ("First offset: %" G_GUINT64_FORMAT, parser->sidx.first_offset);      parser->cumulative_pts =          gst_util_uint64_scale_int_round (parser->sidx.earliest_pts,          GST_SECOND, parser->sidx.timescale);      if (parser->sidx.entries_count) {        parser->sidx.entries =            g_malloc (sizeof (GstSidxBoxEntry) * parser->sidx.entries_count);      }      parser->sidx.entry_index = 0;      parser->status = GST_ISOFF_SIDX_PARSER_DATA;    case GST_ISOFF_SIDX_PARSER_DATA:      while (parser->sidx.entry_index < parser->sidx.entries_count) {        GstSidxBoxEntry *entry =            &parser->sidx.entries[parser->sidx.entry_index];        remaining = gst_byte_reader_get_remaining (&reader);        if (remaining < 12)          break;        entry->offset = parser->cumulative_entry_size;        entry->pts = parser->cumulative_pts;        gst_isoff_parse_sidx_entry (entry, &reader);        entry->duration = gst_util_uint64_scale_int_round (entry->duration,            GST_SECOND, parser->sidx.timescale);        parser->cumulative_entry_size += entry->size;        parser->cumulative_pts += entry->duration;        GST_LOG ("Sidx entry %d) offset: %" G_GUINT64_FORMAT ", pts: %"            GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT " - size %"            G_GUINT32_FORMAT, parser->sidx.entry_index, entry->offset,            GST_TIME_ARGS (entry->pts), GST_TIME_ARGS (entry->duration),            entry->size);        parser->sidx.entry_index++;      }      if (parser->sidx.entry_index == parser->sidx.entries_count)        parser->status = GST_ISOFF_SIDX_PARSER_FINISHED;      else        break;    case GST_ISOFF_SIDX_PARSER_FINISHED:      parser->sidx.entry_index = 0;      res = GST_ISOFF_PARSER_DONE;      break;  }  *consumed = gst_byte_reader_get_pos (&reader);  gst_buffer_unmap (buffer, &info);  return res;}
开发者ID:Haifen,项目名称:gst-plugins-bad,代码行数:101,


示例6: delayed_seek_cb

/* Delayed seek callback. This gets called by the timer setup in the above function. */static gboolean delayed_seek_cb (CustomData *data) {  GST_DEBUG ("Doing delayed seek to %" GST_TIME_FORMAT, GST_TIME_ARGS (data->desired_position));  execute_seek (data->desired_position, data);  return FALSE;}
开发者ID:0x8BADFOOD,项目名称:GstreamerCodeSnippets,代码行数:6,


示例7: gst_base_video_decoder_sink_event

static gbooleangst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event){  GstBaseVideoDecoder *base_video_decoder;  gboolean res = FALSE;  base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));  switch (GST_EVENT_TYPE (event)) {    case GST_EVENT_EOS:      if (!base_video_decoder->packetized)        gst_base_video_decoder_drain (base_video_decoder, TRUE);      res =          gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD          (base_video_decoder), event);      break;    case GST_EVENT_NEWSEGMENT:    {      gboolean update;      double rate;      double applied_rate;      GstFormat format;      gint64 start;      gint64 stop;      gint64 position;      GstSegment *segment = &base_video_decoder->segment;      gst_event_parse_new_segment_full (event, &update, &rate,          &applied_rate, &format, &start, &stop, &position);      if (format != GST_FORMAT_TIME)        goto newseg_wrong_format;      if (!update) {        gst_base_video_decoder_flush (base_video_decoder);      }      base_video_decoder->timestamp_offset = start;      gst_segment_set_newsegment_full (segment,          update, rate, applied_rate, format, start, stop, position);      base_video_decoder->have_segment = TRUE;      GST_WARNING ("new segment: format %d rate %g start %" GST_TIME_FORMAT          " stop %" GST_TIME_FORMAT          " position %" GST_TIME_FORMAT          " update %d",          format, rate,          GST_TIME_ARGS (segment->start),          GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time), update);      res =          gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD          (base_video_decoder), event);      break;    }    case GST_EVENT_FLUSH_STOP:      gst_base_video_decoder_flush (base_video_decoder);      gst_segment_init (&base_video_decoder->segment, GST_FORMAT_TIME);      res =          gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD          (base_video_decoder), event);      break;    default:      res = gst_pad_event_default (pad, event);      break;  }done:  gst_object_unref (base_video_decoder);  return res;newseg_wrong_format:  GST_DEBUG_OBJECT (base_video_decoder, "received non TIME newsegment");  gst_event_unref (event);  goto done;}
开发者ID:collects,项目名称:gst-plugins-bad,代码行数:82,


示例8: gst_base_video_decoder_src_event

static gbooleangst_base_video_decoder_src_event (GstPad * pad, GstEvent * event){  GstBaseVideoDecoder *base_video_decoder;  gboolean res = FALSE;  base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));  switch (GST_EVENT_TYPE (event)) {    case GST_EVENT_SEEK:      /* FIXME: do seek using bitrate incase upstream doesn't handle it */      res =          gst_pad_push_event (GST_BASE_VIDEO_DECODER_SINK_PAD          (base_video_decoder), event);      break;    case GST_EVENT_QOS:    {      gdouble proportion;      GstClockTimeDiff diff;      GstClockTime timestamp;      GstClockTime duration;      gst_event_parse_qos (event, &proportion, &diff, &timestamp);      GST_OBJECT_LOCK (base_video_decoder);      base_video_decoder->proportion = proportion;      if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {        if (G_UNLIKELY (diff > 0)) {          if (base_video_decoder->state.fps_n > 0)            duration =                gst_util_uint64_scale (GST_SECOND,                base_video_decoder->state.fps_d,                base_video_decoder->state.fps_n);          else            duration = 0;          base_video_decoder->earliest_time = timestamp + 2 * diff + duration;        } else {          base_video_decoder->earliest_time = timestamp + diff;        }      } else {        base_video_decoder->earliest_time = GST_CLOCK_TIME_NONE;      }      GST_OBJECT_UNLOCK (base_video_decoder);      GST_DEBUG_OBJECT (base_video_decoder,          "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT ", %g",          GST_TIME_ARGS (timestamp), diff, proportion);      res =          gst_pad_push_event (GST_BASE_VIDEO_DECODER_SINK_PAD          (base_video_decoder), event);      break;    }    default:      res =          gst_pad_push_event (GST_BASE_VIDEO_DECODER_SINK_PAD          (base_video_decoder), event);      break;  }  gst_object_unref (base_video_decoder);  return res;}
开发者ID:collects,项目名称:gst-plugins-bad,代码行数:67,


示例9: gst_mve_demux_chain

static GstFlowReturngst_mve_demux_chain (GstPad * sinkpad, GstBuffer * inbuf){  GstMveDemux *mve = GST_MVE_DEMUX (GST_PAD_PARENT (sinkpad));  GstFlowReturn ret = GST_FLOW_OK;  gst_adapter_push (mve->adapter, inbuf);  GST_DEBUG_OBJECT (mve, "queuing buffer, needed:%d, available:%u",      mve->needed_bytes, gst_adapter_available (mve->adapter));  while ((gst_adapter_available (mve->adapter) >= mve->needed_bytes) &&      (ret == GST_FLOW_OK)) {    GstMveDemuxStream *stream = NULL;    GstBuffer *outbuf = NULL;    switch (mve->state) {      case MVEDEMUX_STATE_INITIAL:        gst_adapter_flush (mve->adapter, mve->needed_bytes);        mve->chunk_offset += mve->needed_bytes;        mve->needed_bytes = 4;        mve->state = MVEDEMUX_STATE_NEXT_CHUNK;        break;      case MVEDEMUX_STATE_NEXT_CHUNK:{        const guint8 *data;        guint16 size;        data = gst_adapter_peek (mve->adapter, mve->needed_bytes);        size = GST_MVE_SEGMENT_SIZE (data);        if (mve->chunk_offset >= mve->chunk_size) {          /* new chunk, flush buffer and proceed with next segment */          guint16 chunk_type = GST_READ_UINT16_LE (data + 2);          gst_adapter_flush (mve->adapter, mve->needed_bytes);          mve->chunk_size = size;          mve->chunk_offset = 0;          if (chunk_type > MVE_CHUNK_END) {            GST_WARNING_OBJECT (mve,                "skipping unknown chunk type 0x%02x of size:%u", chunk_type,                size);            mve->needed_bytes += size;            mve->state = MVEDEMUX_STATE_SKIP;          } else {            GST_DEBUG_OBJECT (mve, "found new chunk type 0x%02x of size:%u",                chunk_type, size);          }        } else if (mve->chunk_offset <= mve->chunk_size) {          /* new segment */          GST_DEBUG_OBJECT (mve, "found segment type 0x%02x of size:%u",              GST_MVE_SEGMENT_TYPE (data), size);          mve->needed_bytes += size;          mve->state = MVEDEMUX_STATE_MOVIE;        }      }        break;      case MVEDEMUX_STATE_MOVIE:        ret = gst_mve_parse_segment (mve, &stream, &outbuf);        if ((ret == GST_FLOW_OK) && (outbuf != NULL)) {          /* send buffer */          GST_DEBUG_OBJECT (mve,              "pushing buffer with time %" GST_TIME_FORMAT              " (%u bytes) on pad %s",              GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),              GST_BUFFER_SIZE (outbuf), GST_PAD_NAME (stream->pad));          ret = gst_pad_push (stream->pad, outbuf);          stream->last_flow = ret;        }        if (ret == GST_FLOW_NOT_LINKED) {          if (mve->audio_stream              && mve->audio_stream->last_flow != GST_FLOW_NOT_LINKED)            ret = GST_FLOW_OK;          if (mve->video_stream              && mve->video_stream->last_flow != GST_FLOW_NOT_LINKED)            ret = GST_FLOW_OK;        }        /* update current offset */        mve->chunk_offset += mve->needed_bytes;        mve->state = MVEDEMUX_STATE_NEXT_CHUNK;        mve->needed_bytes = 4;        break;      case MVEDEMUX_STATE_SKIP:        mve->chunk_offset += mve->needed_bytes;        gst_adapter_flush (mve->adapter, mve->needed_bytes);        mve->state = MVEDEMUX_STATE_NEXT_CHUNK;        mve->needed_bytes = 4;        break;      default://.........这里部分代码省略.........
开发者ID:drothlis,项目名称:gst-plugins-bad,代码行数:101,


示例10: theora_parse_src_query

static gbooleantheora_parse_src_query (GstPad * pad, GstObject * parent, GstQuery * query){  GstTheoraParse *parse;  gboolean res = FALSE;  parse = GST_THEORA_PARSE (parent);  switch (GST_QUERY_TYPE (query)) {    case GST_QUERY_POSITION:    {      gint64 frame, value;      GstFormat my_format, format;      gint64 time;      frame = parse->prev_frame;      GST_LOG_OBJECT (parse,          "query %p: we have current frame: %" G_GINT64_FORMAT, query, frame);      /* parse format */      gst_query_parse_position (query, &format, NULL);      /* and convert to the final format in two steps with time as the        * intermediate step */      my_format = GST_FORMAT_TIME;      if (!(res =              theora_parse_src_convert (parse->sinkpad, GST_FORMAT_DEFAULT,                  frame, &my_format, &time)))        goto error;      /* fixme: handle segments         time = (time - parse->segment.start) + parse->segment.time;       */      GST_LOG_OBJECT (parse,          "query %p: our time: %" GST_TIME_FORMAT " (conv to %s)",          query, GST_TIME_ARGS (time), gst_format_get_name (format));      if (!(res =              theora_parse_src_convert (pad, my_format, time, &format, &value)))        goto error;      gst_query_set_position (query, format, value);      GST_LOG_OBJECT (parse,          "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,          format);      break;    }    case GST_QUERY_DURATION:      /* forward to peer for total */      if (!(res = gst_pad_query (GST_PAD_PEER (parse->sinkpad), query)))        goto error;      break;    case GST_QUERY_CONVERT:    {      GstFormat src_fmt, dest_fmt;      gint64 src_val, dest_val;      gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);      if (!(res =              theora_parse_src_convert (pad, src_fmt, src_val, &dest_fmt,                  &dest_val)))        goto error;      gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);      break;    }    default:      res = gst_pad_query_default (pad, parent, query);      break;  }done:  return res;  /* ERRORS */error:  {    GST_DEBUG_OBJECT (parse, "query failed");    goto done;  }}
开发者ID:reynaldo-samsung,项目名称:gst-plugins-base,代码行数:85,


示例11: main

gintmain (gint argc, gchar * argv[]){  GstElement *pipeline, *filesrc, *decodebin;  GstStateChangeReturn res;  GstIterator *it;  GstBus *bus;  GValue data = { 0, };  gst_init (&argc, &argv);  pipeline = gst_pipeline_new ("pipeline");  filesrc = gst_element_factory_make ("filesrc", "filesrc");  g_assert (filesrc);  decodebin = gst_element_factory_make ("decodebin", "decodebin");  g_assert (decodebin);  gst_bin_add_many (GST_BIN (pipeline), filesrc, decodebin, NULL);  gst_element_link (filesrc, decodebin);  if (argc < 2) {    g_print ("usage: %s <filenames>/n", argv[0]);    exit (-1);  }  if (!g_str_has_prefix (argv[1], "file://")) {    g_object_set (G_OBJECT (filesrc), "location", argv[1], NULL);  } else {    g_object_set (G_OBJECT (filesrc), "location", argv[1] + 7, NULL);  }  /* we've got to connect fakesinks to newly decoded pads to make sure   * buffers have actually been flowing over those pads and caps have   * been set on them. decodebin might insert internal queues and   * without fakesinks it's pot-luck what caps we get from the pad, because   * it depends on whether the queues have started pushing buffers yet or not.   * With fakesinks we make sure that the pipeline doesn't go to PAUSED state   * before each fakesink has a buffer queued. */  g_signal_connect (decodebin, "new-decoded-pad",      G_CALLBACK (new_decoded_pad_cb), pipeline);  bus = gst_element_get_bus (pipeline);  g_print ("pause../n");  res = gst_element_set_state (pipeline, GST_STATE_PAUSED);  if (res == GST_STATE_CHANGE_FAILURE) {    show_error ("Could not go to PAUSED state", bus);    exit (-1);  }  g_print ("waiting../n");  res = gst_element_get_state (pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);  if (res != GST_STATE_CHANGE_SUCCESS) {    show_error ("Failed to complete state change to PAUSED", bus);    exit (-1);  }  g_print ("stats../n");  it = gst_element_iterate_src_pads (decodebin);  while (gst_iterator_next (it, &data) == GST_ITERATOR_OK) {    GstPad *pad = g_value_get_object (&data);    GstCaps *caps;    gchar *str;    GstQuery *query;    g_print ("stream %s:/n", GST_OBJECT_NAME (pad));    caps = gst_pad_query_caps (pad, NULL);    str = gst_caps_to_string (caps);    g_print (" caps: %s/n", str);    g_free (str);    gst_caps_unref (caps);    query = gst_query_new_duration (GST_FORMAT_TIME);    if (gst_pad_query (pad, query)) {      gint64 duration;      gst_query_parse_duration (query, NULL, &duration);      g_print (" duration: %" GST_TIME_FORMAT "/n", GST_TIME_ARGS (duration));    }    gst_query_unref (query);    g_value_reset (&data);  }  g_value_unset (&data);  gst_iterator_free (it);  return 0;}
开发者ID:lubing521,项目名称:gst-embedded-builder,代码行数:91,


示例12: gst_identity_transform_ip

static GstFlowReturngst_identity_transform_ip (GstBaseTransform * trans, GstBuffer * buf){  GstFlowReturn ret = GST_FLOW_OK;  GstIdentity *identity = GST_IDENTITY (trans);  GstClockTime runtimestamp = G_GINT64_CONSTANT (0);  if (identity->check_perfect)    gst_identity_check_perfect (identity, buf);  if (identity->check_imperfect_timestamp)    gst_identity_check_imperfect_timestamp (identity, buf);  if (identity->check_imperfect_offset)    gst_identity_check_imperfect_offset (identity, buf);  /* update prev values */  identity->prev_timestamp = GST_BUFFER_TIMESTAMP (buf);  identity->prev_duration = GST_BUFFER_DURATION (buf);  identity->prev_offset_end = GST_BUFFER_OFFSET_END (buf);  identity->prev_offset = GST_BUFFER_OFFSET (buf);  if (identity->error_after >= 0) {    identity->error_after--;    if (identity->error_after == 0) {      GST_ELEMENT_ERROR (identity, CORE, FAILED,          (_("Failed after iterations as requested.")), (NULL));      return GST_FLOW_ERROR;    }  }  if (identity->drop_probability > 0.0) {    if ((gfloat) (1.0 * rand () / (RAND_MAX)) < identity->drop_probability) {      if (!identity->silent) {        GST_OBJECT_LOCK (identity);        g_free (identity->last_message);        identity->last_message =            g_strdup_printf            ("dropping   ******* (%s:%s)i (%d bytes, timestamp: %"            GST_TIME_FORMAT ", duration: %" GST_TIME_FORMAT ", offset: %"            G_GINT64_FORMAT ", offset_end: % " G_GINT64_FORMAT            ", flags: %d) %p", GST_DEBUG_PAD_NAME (trans->sinkpad),            GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),            GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_BUFFER_OFFSET (buf),            GST_BUFFER_OFFSET_END (buf), GST_BUFFER_FLAGS (buf), buf);        GST_OBJECT_UNLOCK (identity);        gst_identity_notify_last_message (identity);      }      /* return DROPPED to basetransform. */      return GST_BASE_TRANSFORM_FLOW_DROPPED;    }  }  if (identity->dump) {    gst_util_dump_mem (GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));  }  if (!identity->silent) {    GST_OBJECT_LOCK (identity);    g_free (identity->last_message);    identity->last_message =        g_strdup_printf ("chain   ******* (%s:%s)i (%d bytes, timestamp: %"        GST_TIME_FORMAT ", duration: %" GST_TIME_FORMAT ", offset: %"        G_GINT64_FORMAT ", offset_end: % " G_GINT64_FORMAT ", flags: %d) %p",        GST_DEBUG_PAD_NAME (trans->sinkpad), GST_BUFFER_SIZE (buf),        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),        GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),        GST_BUFFER_OFFSET (buf), GST_BUFFER_OFFSET_END (buf),        GST_BUFFER_FLAGS (buf), buf);    GST_OBJECT_UNLOCK (identity);    gst_identity_notify_last_message (identity);  }  if (identity->datarate > 0) {    GstClockTime time = gst_util_uint64_scale_int (identity->offset,        GST_SECOND, identity->datarate);    GST_BUFFER_TIMESTAMP (buf) = time;    GST_BUFFER_DURATION (buf) =        GST_BUFFER_SIZE (buf) * GST_SECOND / identity->datarate;  }  if (identity->signal_handoffs)    g_signal_emit (identity, gst_identity_signals[SIGNAL_HANDOFF], 0, buf);  if (trans->segment.format == GST_FORMAT_TIME)    runtimestamp = gst_segment_to_running_time (&trans->segment,        GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buf));  if ((identity->sync) && (trans->segment.format == GST_FORMAT_TIME)) {    GstClock *clock;    GST_OBJECT_LOCK (identity);    if ((clock = GST_ELEMENT (identity)->clock)) {      GstClockReturn cret;      GstClockTime timestamp;      timestamp = runtimestamp + GST_ELEMENT (identity)->base_time;      /* save id if we need to unlock */      /* FIXME: actually unlock this somewhere in the state changes */      identity->clock_id = gst_clock_new_single_shot_id (clock, timestamp);//.........这里部分代码省略.........
开发者ID:spunktsch,项目名称:svtplayer,代码行数:101,


示例13: gst_base_rtp_audio_payload_handle_buffer

static GstFlowReturngst_base_rtp_audio_payload_handle_buffer (GstBaseRTPPayload *    basepayload, GstBuffer * buffer){  GstBaseRTPAudioPayload *payload;  GstBaseRTPAudioPayloadPrivate *priv;  guint payload_len;  GstFlowReturn ret;  guint available;  guint min_payload_len;  guint max_payload_len;  guint align;  guint size;  gboolean discont;  ret = GST_FLOW_OK;  payload = GST_BASE_RTP_AUDIO_PAYLOAD_CAST (basepayload);  priv = payload->priv;  discont = GST_BUFFER_IS_DISCONT (buffer);  if (discont) {    GstClockTime timestamp;    GST_DEBUG_OBJECT (payload, "Got DISCONT");    /* flush everything out of the adapter, mark DISCONT */    ret = gst_base_rtp_audio_payload_flush (payload, -1, -1);    priv->discont = TRUE;    timestamp = GST_BUFFER_TIMESTAMP (buffer);    /* get the distance between the timestamp gap and produce the same gap in     * the RTP timestamps */    if (priv->last_timestamp != -1 && timestamp != -1) {      /* we had a last timestamp, compare it to the new timestamp and update the       * offset counter for RTP timestamps. The effect is that we will produce       * output buffers containing the same RTP timestamp gap as the gap       * between the GST timestamps. */      if (timestamp > priv->last_timestamp) {        GstClockTime diff;        guint64 bytes;        /* we're only going to apply a positive gap, otherwise we let the marker         * bit do its thing. simply convert to bytes and add the the current         * offset */        diff = timestamp - priv->last_timestamp;        bytes = priv->time_to_bytes (payload, diff);        priv->offset += bytes;        GST_DEBUG_OBJECT (payload,            "elapsed time %" GST_TIME_FORMAT ", bytes %" G_GUINT64_FORMAT            ", new offset %" G_GUINT64_FORMAT, GST_TIME_ARGS (diff), bytes,            priv->offset);      }    }  }  if (!gst_base_rtp_audio_payload_get_lengths (basepayload, &min_payload_len,          &max_payload_len, &align))    goto config_error;  GST_DEBUG_OBJECT (payload,      "Calculated min_payload_len %u and max_payload_len %u",      min_payload_len, max_payload_len);  size = GST_BUFFER_SIZE (buffer);  /* shortcut, we don't need to use the adapter when the packet can be pushed   * through directly. */  available = gst_adapter_available (priv->adapter);  GST_DEBUG_OBJECT (payload, "got buffer size %u, available %u",      size, available);  if (available == 0 && (size >= min_payload_len && size <= max_payload_len) &&      (size % align == 0)) {    /* If buffer fits on an RTP packet, let's just push it through     * this will check against max_ptime and max_mtu */    GST_DEBUG_OBJECT (payload, "Fast packet push");    ret = gst_base_rtp_audio_payload_push_buffer (payload, buffer);  } else {    /* push the buffer in the adapter */    gst_adapter_push (priv->adapter, buffer);    available += size;    GST_DEBUG_OBJECT (payload, "available now %u", available);    /* as long as we have full frames */    while (available >= min_payload_len) {      /* get multiple of alignment */      payload_len = MIN (max_payload_len, available);      payload_len = ALIGN_DOWN (payload_len, align);      /* and flush out the bytes from the adapter, automatically set the       * timestamp. */      ret = gst_base_rtp_audio_payload_flush (payload, payload_len, -1);      available -= payload_len;      GST_DEBUG_OBJECT (payload, "available after push %u", available);    }  }//.........这里部分代码省略.........
开发者ID:genesi,项目名称:gst-base-plugins,代码行数:101,


示例14: gst_ogg_parse_chain

/* Reads in buffers, parses them, reframes into one-buffer-per-ogg-page, submits * pages to output pad. */static GstFlowReturngst_ogg_parse_chain (GstPad * pad, GstBuffer * buffer){  GstOggParse *ogg;  GstFlowReturn result = GST_FLOW_OK;  gint ret = -1;  guint32 serialno;  GstBuffer *pagebuffer;  GstClockTime buffertimestamp = GST_BUFFER_TIMESTAMP (buffer);  ogg = GST_OGG_PARSE (GST_OBJECT_PARENT (pad));  GST_LOG_OBJECT (ogg, "Chain function received buffer of size %d",      GST_BUFFER_SIZE (buffer));  gst_ogg_parse_submit_buffer (ogg, buffer);  while (ret != 0 && result == GST_FLOW_OK) {    ogg_page page;    /* We use ogg_sync_pageseek() rather than ogg_sync_pageout() so that we can     * track how many bytes the ogg layer discarded (in the case of sync errors,     * etc.); this allows us to accurately track the current stream offset     */    ret = ogg_sync_pageseek (&ogg->sync, &page);    if (ret == 0) {      /* need more data, that's fine... */      break;    } else if (ret < 0) {      /* discontinuity; track how many bytes we skipped (-ret) */      ogg->offset -= ret;    } else {      gint64 granule = ogg_page_granulepos (&page);#ifndef GST_DISABLE_GST_DEBUG      int bos = ogg_page_bos (&page);#endif      guint64 startoffset = ogg->offset;      GstOggStream *stream;      gboolean keyframe;      serialno = ogg_page_serialno (&page);      stream = gst_ogg_parse_find_stream (ogg, serialno);      GST_LOG_OBJECT (ogg, "Timestamping outgoing buffer as %" GST_TIME_FORMAT,          GST_TIME_ARGS (buffertimestamp));      if (stream) {        buffertimestamp = gst_ogg_stream_get_end_time_for_granulepos (stream,            granule);        if (ogg->video_stream) {          if (stream == ogg->video_stream) {            keyframe = gst_ogg_stream_granulepos_is_key_frame (stream, granule);          } else {            keyframe = FALSE;          }        } else {          keyframe = TRUE;        }      } else {        buffertimestamp = GST_CLOCK_TIME_NONE;        keyframe = TRUE;      }      pagebuffer = gst_ogg_parse_buffer_from_page (&page, startoffset,          buffertimestamp);      /* We read out 'ret' bytes, so we set the next offset appropriately */      ogg->offset += ret;      GST_LOG_OBJECT (ogg,          "processing ogg page (serial %08x, pageno %ld, "          "granule pos %" G_GUINT64_FORMAT ", bos %d, offset %"          G_GUINT64_FORMAT "-%" G_GUINT64_FORMAT ") keyframe=%d",          serialno, ogg_page_pageno (&page),          granule, bos, startoffset, ogg->offset, keyframe);      if (ogg_page_bos (&page)) {        /* If we've seen this serialno before, this is technically an error,         * we log this case but accept it - this one replaces the previous         * stream with this serialno. We can do this since we're streaming, and         * not supporting seeking...         */        GstOggStream *stream = gst_ogg_parse_find_stream (ogg, serialno);        if (stream != NULL) {          GST_LOG_OBJECT (ogg, "Incorrect stream; repeats serial number %u "              "at offset %" G_GINT64_FORMAT, serialno, ogg->offset);        }        if (ogg->last_page_not_bos) {          GST_LOG_OBJECT (ogg, "Deleting all referenced streams, found a new "              "chain starting with serial %u", serialno);          gst_ogg_parse_delete_all_streams (ogg);        }        stream = gst_ogg_parse_new_stream (ogg, &page);        ogg->last_page_not_bos = FALSE;//.........这里部分代码省略.........
开发者ID:ChinnaSuhas,项目名称:ossbuild,代码行数:101,


示例15: gst_base_video_decoder_chain

static GstFlowReturngst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf){  GstBaseVideoDecoder *base_video_decoder;  GstFlowReturn ret;  GST_DEBUG ("chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT,      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),      GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));#if 0  /* requiring the pad to be negotiated makes it impossible to use   * oggdemux or filesrc ! decoder */  if (!gst_pad_is_negotiated (pad)) {    GST_DEBUG ("not negotiated");    return GST_FLOW_NOT_NEGOTIATED;  }#endif  base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));  GST_DEBUG_OBJECT (base_video_decoder, "chain");  if (!base_video_decoder->have_segment) {    GstEvent *event;    GstFlowReturn ret;    GST_WARNING        ("Received buffer without a new-segment. Assuming timestamps start from 0.");    gst_segment_set_newsegment_full (&base_video_decoder->segment,        FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0);    base_video_decoder->have_segment = TRUE;    event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0,        GST_CLOCK_TIME_NONE, 0);    ret =        gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder),        event);    if (!ret) {      GST_ERROR ("new segment event ret=%d", ret);      return GST_FLOW_ERROR;    }  }  if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) {    GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer");    gst_base_video_decoder_flush (base_video_decoder);  }  base_video_decoder->input_offset += GST_BUFFER_SIZE (buf);  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {    gst_base_video_decoder_add_timestamp (base_video_decoder, buf);  }  if (!base_video_decoder->current_frame)    base_video_decoder->current_frame =        gst_base_video_decoder_new_frame (base_video_decoder);  if (base_video_decoder->packetized) {    base_video_decoder->current_frame->sink_buffer = buf;    ret = gst_base_video_decoder_have_frame (base_video_decoder, TRUE, NULL);  } else {    gst_adapter_push (base_video_decoder->input_adapter, buf);    ret = gst_base_video_decoder_drain (base_video_decoder, FALSE);  }  gst_object_unref (base_video_decoder);  return ret;}
开发者ID:collects,项目名称:gst-plugins-bad,代码行数:74,


示例16: gst_rtp_dtmf_depay_process

//.........这里部分代码省略.........  /* clip to whole units of unit_time */  if (rtpdtmfdepay->unit_time) {    guint unit_time_clock =        (rtpdtmfdepay->unit_time * depayload->clock_rate) / 1000;    if (dtmf_payload.duration % unit_time_clock) {      /* Make sure we don't overflow the duration */      if (dtmf_payload.duration < G_MAXUINT16 - unit_time_clock)        dtmf_payload.duration += unit_time_clock -            (dtmf_payload.duration % unit_time_clock);      else        dtmf_payload.duration -= dtmf_payload.duration % unit_time_clock;    }  }  /* clip to max duration */  if (rtpdtmfdepay->max_duration) {    guint max_duration_clock =        (rtpdtmfdepay->max_duration * depayload->clock_rate) / 1000;    if (max_duration_clock < G_MAXUINT16 &&        dtmf_payload.duration > max_duration_clock)      dtmf_payload.duration = max_duration_clock;  }  GST_DEBUG_OBJECT (depayload, "Received new RTP DTMF packet : "      "marker=%d - timestamp=%u - event=%d - duration=%d",      marker, timestamp, dtmf_payload.event, dtmf_payload.duration);  GST_DEBUG_OBJECT (depayload,      "Previous information : timestamp=%u - duration=%d",      rtpdtmfdepay->previous_ts, rtpdtmfdepay->previous_duration);  /* First packet */  if (marker || rtpdtmfdepay->previous_ts != timestamp) {    rtpdtmfdepay->sample = 0;    rtpdtmfdepay->previous_ts = timestamp;    rtpdtmfdepay->previous_duration = dtmf_payload.duration;    rtpdtmfdepay->first_gst_ts = GST_BUFFER_TIMESTAMP (buf);    structure = gst_structure_new ("dtmf-event",        "number", G_TYPE_INT, dtmf_payload.event,        "volume", G_TYPE_INT, dtmf_payload.volume,        "type", G_TYPE_INT, 1, "method", G_TYPE_INT, 1, NULL);    if (structure) {      dtmf_message =          gst_message_new_element (GST_OBJECT (depayload), structure);      if (dtmf_message) {        if (!gst_element_post_message (GST_ELEMENT (depayload), dtmf_message)) {          GST_ERROR_OBJECT (depayload,              "Unable to send dtmf-event message to bus");        }      } else {        GST_ERROR_OBJECT (depayload, "Unable to create dtmf-event message");      }    } else {      GST_ERROR_OBJECT (depayload, "Unable to create dtmf-event structure");    }  } else {    guint16 duration = dtmf_payload.duration;    dtmf_payload.duration -= rtpdtmfdepay->previous_duration;    /* If late buffer, ignore */    if (duration > rtpdtmfdepay->previous_duration)      rtpdtmfdepay->previous_duration = duration;  }  GST_DEBUG_OBJECT (depayload, "new previous duration : %d - new duration : %d"      " - diff  : %d - clock rate : %d - timestamp : %" G_GUINT64_FORMAT,      rtpdtmfdepay->previous_duration, dtmf_payload.duration,      (rtpdtmfdepay->previous_duration - dtmf_payload.duration),      depayload->clock_rate, GST_BUFFER_TIMESTAMP (buf));  /* If late or duplicate packet (like the redundant end packet). Ignore */  if (dtmf_payload.duration > 0) {    outbuf = gst_buffer_new ();    gst_dtmf_src_generate_tone (rtpdtmfdepay, dtmf_payload, outbuf);    GST_BUFFER_TIMESTAMP (outbuf) = rtpdtmfdepay->first_gst_ts +        (rtpdtmfdepay->previous_duration - dtmf_payload.duration) *        GST_SECOND / depayload->clock_rate;    GST_BUFFER_OFFSET (outbuf) =        (rtpdtmfdepay->previous_duration - dtmf_payload.duration) *        GST_SECOND / depayload->clock_rate;    GST_BUFFER_OFFSET_END (outbuf) = rtpdtmfdepay->previous_duration *        GST_SECOND / depayload->clock_rate;    GST_DEBUG_OBJECT (depayload,        "timestamp : %" G_GUINT64_FORMAT " - time %" GST_TIME_FORMAT,        GST_BUFFER_TIMESTAMP (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));  }  return outbuf;bad_packet:  GST_ELEMENT_WARNING (rtpdtmfdepay, STREAM, DECODE,      ("Packet did not validate"), (NULL));  return NULL;}
开发者ID:spunktsch,项目名称:svtplayer,代码行数:101,


示例17: gst_base_video_decoder_finish_frame

GstFlowReturngst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder,    GstVideoFrame * frame){  GstBaseVideoDecoderClass *base_video_decoder_class;  GstClockTime presentation_timestamp;  GstClockTime presentation_duration;  GstBuffer *src_buffer;  GST_DEBUG ("finish frame");  base_video_decoder_class =      GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);  if (!gst_base_video_decoder_set_src_caps (base_video_decoder))    return GST_FLOW_NOT_NEGOTIATED;  gst_base_video_decoder_calculate_timestamps (base_video_decoder, frame,      &presentation_timestamp, &presentation_duration);  src_buffer = frame->src_buffer;  GST_BUFFER_FLAG_UNSET (src_buffer, GST_BUFFER_FLAG_DELTA_UNIT);  if (base_video_decoder->state.interlaced) {#ifndef GST_VIDEO_BUFFER_TFF#define GST_VIDEO_BUFFER_TFF (GST_MINI_OBJECT_FLAG_LAST << 5)#endif#ifndef GST_VIDEO_BUFFER_RFF#define GST_VIDEO_BUFFER_RFF (GST_MINI_OBJECT_FLAG_LAST << 6)#endif#ifndef GST_VIDEO_BUFFER_ONEFIELD#define GST_VIDEO_BUFFER_ONEFIELD (GST_MINI_OBJECT_FLAG_LAST << 7)#endif    if (GST_VIDEO_FRAME_FLAG_IS_SET (frame, GST_VIDEO_FRAME_FLAG_TFF)) {      GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_TFF);    } else {      GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_TFF);    }    GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_RFF);    GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD);    if (frame->n_fields == 3) {      GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_RFF);    } else if (frame->n_fields == 1) {      GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD);    }  }  if (base_video_decoder->discont) {    GST_BUFFER_FLAG_UNSET (src_buffer, GST_BUFFER_FLAG_DISCONT);    base_video_decoder->discont = FALSE;  }  GST_BUFFER_TIMESTAMP (src_buffer) = presentation_timestamp;  GST_BUFFER_DURATION (src_buffer) = presentation_duration;  GST_BUFFER_OFFSET (src_buffer) = GST_BUFFER_OFFSET_NONE;  GST_BUFFER_OFFSET_END (src_buffer) = GST_BUFFER_OFFSET_NONE;  GST_DEBUG ("pushing frame %" GST_TIME_FORMAT,      GST_TIME_ARGS (presentation_timestamp));  if (base_video_decoder->sink_clipping) {    gint64 start = GST_BUFFER_TIMESTAMP (src_buffer);    gint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) +        GST_BUFFER_DURATION (src_buffer);    if (gst_segment_clip (&base_video_decoder->segment, GST_FORMAT_TIME,            start, stop, &start, &stop)) {      GST_BUFFER_TIMESTAMP (src_buffer) = start;      GST_BUFFER_DURATION (src_buffer) = stop - start;      GST_DEBUG ("accepting buffer inside segment: %" GST_TIME_FORMAT          " %" GST_TIME_FORMAT          " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT          " time %" GST_TIME_FORMAT,          GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)),          GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer) +              GST_BUFFER_DURATION (src_buffer)),          GST_TIME_ARGS (base_video_decoder->segment.start),          GST_TIME_ARGS (base_video_decoder->segment.stop),          GST_TIME_ARGS (base_video_decoder->segment.time));    } else {      GST_DEBUG ("dropping buffer outside segment: %" GST_TIME_FORMAT          " %" GST_TIME_FORMAT          " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT          " time %" GST_TIME_FORMAT,          GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)),          GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer) +              GST_BUFFER_DURATION (src_buffer)),          GST_TIME_ARGS (base_video_decoder->segment.start),          GST_TIME_ARGS (base_video_decoder->segment.stop),          GST_TIME_ARGS (base_video_decoder->segment.time));      gst_video_frame_unref (frame);      return GST_FLOW_OK;    }  }  gst_buffer_ref (src_buffer);  gst_video_frame_unref (frame);//.........这里部分代码省略.........
开发者ID:collects,项目名称:gst-plugins-bad,代码行数:101,


示例18: gst_segment_clip

HRESULT AudioFakeSink::DoRenderSample(IMediaSample *pMediaSample){  GstBuffer *out_buf = NULL;  gboolean in_seg = FALSE;  GstClockTime buf_start, buf_stop;  gint64 clip_start = 0, clip_stop = 0;  guint start_offset = 0, stop_offset;  GstClockTime duration;  if(pMediaSample)  {    BYTE *pBuffer = NULL;    LONGLONG lStart = 0, lStop = 0;    long size = pMediaSample->GetActualDataLength();    pMediaSample->GetPointer(&pBuffer);    pMediaSample->GetTime(&lStart, &lStop);        if (!GST_CLOCK_TIME_IS_VALID (mDec->timestamp)) {      // Convert REFERENCE_TIME to GST_CLOCK_TIME      mDec->timestamp = (GstClockTime)lStart * 100;    }    duration = (lStop - lStart) * 100;    buf_start = mDec->timestamp;    buf_stop = mDec->timestamp + duration;    /* save stop position to start next buffer with it */    mDec->timestamp = buf_stop;    /* check if this buffer is in our current segment */    in_seg = gst_segment_clip (mDec->segment, GST_FORMAT_TIME,        buf_start, buf_stop, &clip_start, &clip_stop);    /* if the buffer is out of segment do not push it downstream */    if (!in_seg) {      GST_DEBUG_OBJECT (mDec,          "buffer is out of segment, start %" GST_TIME_FORMAT " stop %"          GST_TIME_FORMAT, GST_TIME_ARGS (buf_start), GST_TIME_ARGS (buf_stop));      goto done;    }    /* buffer is entirely or partially in-segment, so allocate a     * GstBuffer for output, and clip if required */    /* allocate a new buffer for raw audio */    mDec->last_ret = gst_pad_alloc_buffer (mDec->srcpad,         GST_BUFFER_OFFSET_NONE,        size,        GST_PAD_CAPS (mDec->srcpad), &out_buf);    if (!out_buf) {      GST_WARNING_OBJECT (mDec, "cannot allocate a new GstBuffer");      goto done;    }    /* set buffer properties */    GST_BUFFER_TIMESTAMP (out_buf) = buf_start;    GST_BUFFER_DURATION (out_buf) = duration;    memcpy (GST_BUFFER_DATA (out_buf), pBuffer,        MIN ((unsigned int)size, GST_BUFFER_SIZE (out_buf)));    /* we have to remove some heading samples */    if ((GstClockTime) clip_start > buf_start) {      start_offset = (guint)gst_util_uint64_scale_int (clip_start - buf_start,          mDec->rate, GST_SECOND) * mDec->depth / 8 * mDec->channels;    }    else      start_offset = 0;    /* we have to remove some trailing samples */    if ((GstClockTime) clip_stop < buf_stop) {      stop_offset = (guint)gst_util_uint64_scale_int (buf_stop - clip_stop,          mDec->rate, GST_SECOND) * mDec->depth / 8 * mDec->channels;    }    else      stop_offset = size;    /* truncating */    if ((start_offset != 0) || (stop_offset != (size_t) size)) {      GstBuffer *subbuf = gst_buffer_create_sub (out_buf, start_offset,          stop_offset - start_offset);      if (subbuf) {        gst_buffer_set_caps (subbuf, GST_PAD_CAPS (mDec->srcpad));        gst_buffer_unref (out_buf);        out_buf = subbuf;      }    }    GST_BUFFER_TIMESTAMP (out_buf) = clip_start;    GST_BUFFER_DURATION (out_buf) = clip_stop - clip_start;    /* replace the saved stop position by the clipped one */    mDec->timestamp = clip_stop;    GST_DEBUG_OBJECT (mDec,        "push_buffer (size %d)=> pts %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT        " duration %" GST_TIME_FORMAT, size,        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (out_buf)),        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (out_buf) +            GST_BUFFER_DURATION (out_buf)),//.........这里部分代码省略.........
开发者ID:spunktsch,项目名称:svtplayer,代码行数:101,


示例19: gst_base_video_parse_src_query

static gbooleangst_base_video_parse_src_query (GstPad * pad, GstQuery * query){  GstBaseVideoParse *base_parse;  gboolean res = FALSE;  base_parse = GST_BASE_VIDEO_PARSE (gst_pad_get_parent (pad));  switch (GST_QUERY_TYPE (query)) {    case GST_QUERY_POSITION:    {      GstFormat format;      gint64 time;      gint64 value;      gst_query_parse_position (query, &format, NULL);      time = gst_util_uint64_scale (base_parse->presentation_frame_number,          base_parse->state.fps_n, base_parse->state.fps_d);      time += base_parse->segment.time;      GST_DEBUG ("query position %" GST_TIME_FORMAT, GST_TIME_ARGS (time));      res = gst_base_video_encoded_video_convert (&base_parse->state,          GST_FORMAT_TIME, time, &format, &value);      if (!res)        goto error;      gst_query_set_position (query, format, value);      break;    }    case GST_QUERY_DURATION:      res =          gst_pad_query (GST_PAD_PEER (GST_BASE_VIDEO_CODEC_SINK_PAD              (base_parse)), query);      if (!res)        goto error;      break;    case GST_QUERY_CONVERT:    {      GstFormat src_fmt, dest_fmt;      gint64 src_val, dest_val;      GST_WARNING ("query convert");      gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);      res = gst_base_video_encoded_video_convert (&base_parse->state,          src_fmt, src_val, &dest_fmt, &dest_val);      if (!res)        goto error;      gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);      break;    }    default:      res = gst_pad_query_default (pad, query);      break;  }done:  gst_object_unref (base_parse);  return res;error:  GST_DEBUG_OBJECT (base_parse, "query failed");  goto done;}
开发者ID:adesurya,项目名称:gst-mobile,代码行数:63,


示例20: gst_base_video_parse_src_event

static gbooleangst_base_video_parse_src_event (GstPad * pad, GstEvent * event){  GstBaseVideoParse *base_video_parse;  gboolean res = FALSE;  base_video_parse = GST_BASE_VIDEO_PARSE (gst_pad_get_parent (pad));  switch (GST_EVENT_TYPE (event)) {    case GST_EVENT_SEEK:    {      GstFormat format, tformat;      gdouble rate;      GstEvent *real_seek;      GstSeekFlags flags;      GstSeekType cur_type, stop_type;      gint64 cur, stop;      gint64 tcur, tstop;      gst_event_parse_seek (event, &rate, &format, &flags, &cur_type,          &cur, &stop_type, &stop);      gst_event_unref (event);      tformat = GST_FORMAT_TIME;      res = gst_base_video_encoded_video_convert (&base_video_parse->state,          format, cur, &tformat, &tcur);      if (!res)        goto convert_error;      res = gst_base_video_encoded_video_convert (&base_video_parse->state,          format, stop, &tformat, &tstop);      if (!res)        goto convert_error;      real_seek = gst_event_new_seek (rate, GST_FORMAT_TIME,          flags, cur_type, tcur, stop_type, tstop);      res =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_parse),          real_seek);      break;    }#if 0    case GST_EVENT_QOS:    {      gdouble proportion;      GstClockTimeDiff diff;      GstClockTime timestamp;      gst_event_parse_qos (event, &proportion, &diff, &timestamp);      GST_OBJECT_LOCK (base_video_parse);      base_video_parse->proportion = proportion;      base_video_parse->earliest_time = timestamp + diff;      GST_OBJECT_UNLOCK (base_video_parse);      GST_DEBUG_OBJECT (base_video_parse,          "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT,          GST_TIME_ARGS (timestamp), diff);      res = gst_pad_push_event (base_video_parse->sinkpad, event);      break;    }#endif    default:      res =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_parse),          event);      break;  }done:  gst_object_unref (base_video_parse);  return res;convert_error:  GST_DEBUG_OBJECT (base_video_parse, "could not convert format");  goto done;}
开发者ID:adesurya,项目名称:gst-mobile,代码行数:78,


示例21: gst_base_video_parse_sink_event

static gbooleangst_base_video_parse_sink_event (GstPad * pad, GstEvent * event){  GstBaseVideoParse *base_video_parse;  gboolean ret = FALSE;  base_video_parse = GST_BASE_VIDEO_PARSE (gst_pad_get_parent (pad));  switch (GST_EVENT_TYPE (event)) {    case GST_EVENT_FLUSH_START:      ret =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse),          event);      break;    case GST_EVENT_FLUSH_STOP:      gst_base_video_parse_reset (base_video_parse);      ret =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse),          event);      break;    case GST_EVENT_EOS:      if (gst_base_video_parse_push_all (base_video_parse,              FALSE) == GST_FLOW_ERROR) {        gst_event_unref (event);        return FALSE;      }      ret =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse),          event);      break;    case GST_EVENT_NEWSEGMENT:    {      gboolean update;      GstFormat format;      gdouble rate;      gint64 start, stop, time;      gst_event_parse_new_segment (event, &update, &rate, &format, &start,          &stop, &time);      if (format != GST_FORMAT_TIME)        goto newseg_wrong_format;      if (rate <= 0.0)        goto newseg_wrong_rate;      GST_DEBUG ("newsegment %" GST_TIME_FORMAT " %" GST_TIME_FORMAT,          GST_TIME_ARGS (start), GST_TIME_ARGS (time));      gst_segment_set_newsegment (&base_video_parse->segment, update,          rate, format, start, stop, time);      ret =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse),          event);      break;    }    default:      ret =          gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse),          event);      break;  }done:  gst_object_unref (base_video_parse);  return ret;newseg_wrong_format:  GST_DEBUG_OBJECT (base_video_parse, "received non TIME newsegment");  gst_event_unref (event);  goto done;newseg_wrong_rate:  GST_DEBUG_OBJECT (base_video_parse, "negative rates not supported");  gst_event_unref (event);  goto done;}
开发者ID:adesurya,项目名称:gst-mobile,代码行数:77,


示例22: gst_base_video_parse_finish_frame

GstFlowReturngst_base_video_parse_finish_frame (GstBaseVideoParse * base_video_parse){  GstVideoFrame *frame = base_video_parse->current_frame;  GstBuffer *buffer;  GstBaseVideoParseClass *base_video_parse_class;  GstFlowReturn ret;  GST_DEBUG ("finish_frame");  base_video_parse_class = GST_BASE_VIDEO_PARSE_GET_CLASS (base_video_parse);  buffer = gst_adapter_take_buffer (base_video_parse->output_adapter,      gst_adapter_available (base_video_parse->output_adapter));  if (frame->is_sync_point) {    base_video_parse->timestamp_offset = base_video_parse->last_timestamp -        gst_util_uint64_scale (frame->presentation_frame_number,        base_video_parse->state.fps_d * GST_SECOND,        base_video_parse->state.fps_n);    base_video_parse->distance_from_sync = 0;  }  frame->distance_from_sync = base_video_parse->distance_from_sync;  base_video_parse->distance_from_sync++;  frame->presentation_timestamp =      gst_base_video_parse_get_timestamp (base_video_parse,      frame->presentation_frame_number);  frame->presentation_duration =      gst_base_video_parse_get_timestamp (base_video_parse,      frame->presentation_frame_number + 1) - frame->presentation_timestamp;  frame->decode_timestamp =      gst_base_video_parse_get_timestamp (base_video_parse,      frame->decode_frame_number);  GST_BUFFER_TIMESTAMP (buffer) = frame->presentation_timestamp;  GST_BUFFER_DURATION (buffer) = frame->presentation_duration;  if (frame->decode_frame_number < 0) {    GST_BUFFER_OFFSET (buffer) = 0;  } else {    GST_BUFFER_OFFSET (buffer) = frame->decode_timestamp;  }  GST_BUFFER_OFFSET_END (buffer) = GST_CLOCK_TIME_NONE;  GST_DEBUG ("pts %" GST_TIME_FORMAT,      GST_TIME_ARGS (frame->presentation_timestamp));  GST_DEBUG ("dts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->decode_timestamp));  GST_DEBUG ("dist %d", frame->distance_from_sync);  if (frame->is_sync_point) {    GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT);  } else {    GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT);  }  frame->src_buffer = buffer;  ret = base_video_parse_class->shape_output (base_video_parse, frame);  gst_base_video_parse_free_frame (base_video_parse->current_frame);  /* create new frame */  base_video_parse->current_frame =      gst_base_video_parse_new_frame (base_video_parse);  return ret;}
开发者ID:adesurya,项目名称:gst-mobile,代码行数:67,


示例23: update_rms_from_buffer

static GstMessage *update_rms_from_buffer (GstVideoFrameAudioLevel * self, GstBuffer * inbuf){  GstMapInfo map;  guint8 *in_data;  gsize in_size;  gdouble CS;  guint i;  guint num_frames, frames;  guint num_int_samples = 0;    /* number of interleaved samples                                 * ie. total count for all channels combined */  gint channels, rate, bps;  GValue v = G_VALUE_INIT;  GValue va = G_VALUE_INIT;  GValueArray *a;  GstStructure *s;  GstMessage *msg;  GstClockTime duration, running_time;  channels = GST_AUDIO_INFO_CHANNELS (&self->ainfo);  bps = GST_AUDIO_INFO_BPS (&self->ainfo);  rate = GST_AUDIO_INFO_RATE (&self->ainfo);  gst_buffer_map (inbuf, &map, GST_MAP_READ);  in_data = map.data;  in_size = map.size;  num_int_samples = in_size / bps;  GST_LOG_OBJECT (self, "analyzing %u sample frames at ts %" GST_TIME_FORMAT,      num_int_samples, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)));  g_return_val_if_fail (num_int_samples % channels == 0, NULL);  num_frames = num_int_samples / channels;  frames = num_frames;  duration = GST_FRAMES_TO_CLOCK_TIME (frames, rate);  if (num_frames > 0) {    for (i = 0; i < channels; ++i) {      self->process (in_data + (bps * i), num_int_samples, channels, &CS);      GST_LOG_OBJECT (self,          "[%d]: cumulative squares %lf, over %d samples/%d channels",          i, CS, num_int_samples, channels);      self->CS[i] += CS;    }    in_data += num_frames * bps;    self->total_frames += num_frames;  }  running_time =      self->first_time + gst_util_uint64_scale (self->total_frames, GST_SECOND,      rate);  a = g_value_array_new (channels);  s = gst_structure_new ("videoframe-audiolevel", "running-time", G_TYPE_UINT64,      running_time, "duration", G_TYPE_UINT64, duration, NULL);  g_value_init (&v, G_TYPE_DOUBLE);  g_value_init (&va, G_TYPE_VALUE_ARRAY);  for (i = 0; i < channels; i++) {    gdouble rms;    if (frames == 0 || self->CS[i] == 0) {      rms = 0;                  /* empty buffer */    } else {      rms = sqrt (self->CS[i] / frames);    }    self->CS[i] = 0.0;    g_value_set_double (&v, rms);    g_value_array_append (a, &v);  }  g_value_take_boxed (&va, a);  gst_structure_take_value (s, "rms", &va);  msg = gst_message_new_element (GST_OBJECT (self), s);  gst_buffer_unmap (inbuf, &map);  return msg;}
开发者ID:0p1pp1,项目名称:gst-plugins-bad,代码行数:78,


示例24: mfw_gst_vpuenc_chain

//.........这里部分代码省略.........	i = 0;	if (vpu_enc->memory == V4L2_MEMORY_USERPTR) {		for (i = 0; i < NUM_BUFFERS; i++) {			if (vpu_enc->buf_v4l2[i].m.userptr == (long int)GST_BUFFER_DATA (buffer))				break;		}		if (i == NUM_BUFFERS) {			for (i = 0; i < NUM_BUFFERS; i++) {				if (!vpu_enc->buf_v4l2[i].m.userptr)					break;			}		}		i = i % NUM_BUFFERS;	}	if (i == NUM_BUFFERS) {		printf("NO BUFFER AVAILABLE/n");		return GST_FLOW_ERROR;	}	if (!buffer)		return GST_FLOW_OK;	if (vpu_enc->memory == V4L2_MEMORY_MMAP) {		/* copy the input Frame into the allocated buffer */		memcpy(vpu_enc->buf_data[i], GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE(buffer));		gst_buffer_unref(buffer);	} else {		vpu_enc->buf_v4l2[i].m.userptr = (long int)GST_BUFFER_DATA (buffer);		vpu_enc->buf_v4l2[i].length = GST_BUFFER_SIZE (buffer);	}	pollfd.fd = vpu_enc->vpu_fd;	pollfd.events = POLLIN | POLLOUT;	ret = ioctl(vpu_enc->vpu_fd, VIDIOC_QBUF, &vpu_enc->buf_v4l2[i]);	if (ret) {		if (vpu_enc->memory == V4L2_MEMORY_USERPTR) {			/* fallback to mmap */			vpu_enc->init = FALSE;			vpu_enc->memory = V4L2_MEMORY_MMAP;			GST_WARNING("mfw_gst_vpuenc_chain: fallback to mmap");			return mfw_gst_vpuenc_chain(pad, buffer);		}		GST_ERROR("VIDIOC_QBUF failed: %s/n", strerror(errno));		return GST_FLOW_ERROR;	}	if (!vpu_enc->once) {		retval = ioctl(vpu_enc->vpu_fd, VIDIOC_STREAMON, &type);		if (retval) {			printf("streamon failed with %d", retval);			return GST_FLOW_ERROR;		}		vpu_enc->once = 1;	}	ret = ioctl(vpu_enc->vpu_fd, VIDIOC_DQBUF, &vpu_enc->buf_v4l2[0]);	if (ret) {		GST_ERROR("VIDIOC_DQBUF failed: %s/n", strerror(errno));		return GST_FLOW_ERROR;	}	if (vpu_enc->memory == V4L2_MEMORY_USERPTR) {		gst_buffer_unref(buffer);	}	src_caps = GST_PAD_CAPS(vpu_enc->srcpad);	retval = gst_pad_alloc_buffer_and_set_caps(vpu_enc->srcpad,			0, 1024 * 1024, src_caps, &outbuffer);	if (retval != GST_FLOW_OK) {		GST_ERROR("Allocating buffer failed with %d", ret);		return retval;	}	ret = read(vpu_enc->vpu_fd, GST_BUFFER_DATA(outbuffer), 1024 * 1024);	if (ret < 0) {		printf("read failed: %s/n", strerror(errno));		return GST_FLOW_ERROR;	}	GST_BUFFER_SIZE(outbuffer) = ret;	GST_BUFFER_TIMESTAMP(outbuffer) = gst_util_uint64_scale(vpu_enc->encoded_frames,		1 * GST_SECOND,		vpu_enc->framerate);	vpu_enc->encoded_frames++;	GST_DEBUG_OBJECT(vpu_enc, "frame encoded : %lld ts = %" GST_TIME_FORMAT,			vpu_enc->encoded_frames,			GST_TIME_ARGS(GST_BUFFER_TIMESTAMP(outbuffer)));	retval = gst_pad_push(vpu_enc->srcpad, outbuffer);	if (retval != GST_FLOW_OK) {		GST_ERROR("Pushing Output onto the source pad failed with %d /n",			  retval);	}	return retval;}
开发者ID:jmartinc,项目名称:gst-plugins-fsl-vpu,代码行数:101,


示例25: gst_rtp_dv_pay_handle_buffer

/* Get a DV frame, chop it up in pieces, and push the pieces to the RTP layer. */static GstFlowReturngst_rtp_dv_pay_handle_buffer (GstRTPBasePayload * basepayload,    GstBuffer * buffer){  GstRTPDVPay *rtpdvpay;  guint max_payload_size;  GstBuffer *outbuf;  GstFlowReturn ret = GST_FLOW_OK;  gint hdrlen;  gsize size;  GstMapInfo map;  guint8 *data;  guint8 *dest;  guint filled;  GstRTPBuffer rtp = { NULL, };  rtpdvpay = GST_RTP_DV_PAY (basepayload);  hdrlen = gst_rtp_buffer_calc_header_len (0);  /* DV frames are made up from a bunch of DIF blocks. DIF blocks are 80 bytes   * each, and we should put an integral number of them in each RTP packet.   * Therefore, we round the available room down to the nearest multiple of 80.   *   * The available room is just the packet MTU, minus the RTP header length. */  max_payload_size = ((GST_RTP_BASE_PAYLOAD_MTU (rtpdvpay) - hdrlen) / 80) * 80;  /* The length of the buffer to transmit. */  if (!gst_buffer_map (buffer, &map, GST_MAP_READ)) {    GST_ELEMENT_ERROR (rtpdvpay, CORE, FAILED,        (NULL), ("Failed to map buffer"));    gst_buffer_unref (buffer);    return GST_FLOW_ERROR;  }  data = map.data;  size = map.size;  GST_DEBUG_OBJECT (rtpdvpay,      "DV RTP payloader got buffer of %" G_GSIZE_FORMAT      " bytes, splitting in %u byte " "payload fragments, at time %"      GST_TIME_FORMAT, size, max_payload_size,      GST_TIME_ARGS (GST_BUFFER_PTS (buffer)));  if (!rtpdvpay->negotiated) {    gst_dv_pay_negotiate (rtpdvpay, data, size);    /* if we have not yet scanned the stream for its type, do so now */    rtpdvpay->negotiated = TRUE;  }  outbuf = NULL;  dest = NULL;  filled = 0;  /* while we have a complete DIF chunks left */  while (size >= 80) {    /* Allocate a new buffer, set the timestamp */    if (outbuf == NULL) {      outbuf = gst_rtp_buffer_new_allocate (max_payload_size, 0, 0);      GST_BUFFER_PTS (outbuf) = GST_BUFFER_PTS (buffer);      if (!gst_rtp_buffer_map (outbuf, GST_MAP_WRITE, &rtp)) {        gst_buffer_unref (outbuf);        GST_ELEMENT_ERROR (rtpdvpay, CORE, FAILED,            (NULL), ("Failed to map RTP buffer"));        ret = GST_FLOW_ERROR;        goto beach;      }      dest = gst_rtp_buffer_get_payload (&rtp);      filled = 0;    }    /* inspect the DIF chunk, if we don't need to include it, skip to the next one. */    if (include_dif (rtpdvpay, data)) {      /* copy data in packet */      memcpy (dest, data, 80);      dest += 80;      filled += 80;    }    /* go to next dif chunk */    size -= 80;    data += 80;    /* push out the buffer if the next one would exceed the max packet size or     * when we are pushing the last packet */    if (filled + 80 > max_payload_size || size < 80) {      if (size < 160) {        guint hlen;        /* set marker */        gst_rtp_buffer_set_marker (&rtp, TRUE);        /* shrink buffer to last packet */        hlen = gst_rtp_buffer_get_header_len (&rtp);        gst_rtp_buffer_set_packet_len (&rtp, hlen + filled);      }      /* Push out the created piece, and check for errors. *///.........这里部分代码省略.........
开发者ID:Distrotech,项目名称:gst-plugins-good,代码行数:101,


示例26: gst_base_rtp_audio_payload_push_buffer

static GstFlowReturngst_base_rtp_audio_payload_push_buffer (GstBaseRTPAudioPayload *    baseaudiopayload, GstBuffer * buffer){  GstBaseRTPPayload *basepayload;  GstBaseRTPAudioPayloadPrivate *priv;  GstBuffer *outbuf;  GstClockTime timestamp;  guint8 *payload;  guint payload_len;  GstFlowReturn ret;  priv = baseaudiopayload->priv;  basepayload = GST_BASE_RTP_PAYLOAD (baseaudiopayload);  payload_len = GST_BUFFER_SIZE (buffer);  timestamp = GST_BUFFER_TIMESTAMP (buffer);  GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT,      payload_len, GST_TIME_ARGS (timestamp));  if (priv->buffer_list) {    /* create just the RTP header buffer */    outbuf = gst_rtp_buffer_new_allocate (0, 0, 0);  } else {    /* create buffer to hold the payload */    outbuf = gst_rtp_buffer_new_allocate (payload_len, 0, 0);  }  /* set metadata */  gst_base_rtp_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len,      timestamp);  if (priv->buffer_list) {    GstBufferList *list;    GstBufferListIterator *it;    list = gst_buffer_list_new ();    it = gst_buffer_list_iterate (list);    /* add both buffers to the buffer list */    gst_buffer_list_iterator_add_group (it);    gst_buffer_list_iterator_add (it, outbuf);    gst_buffer_list_iterator_add (it, buffer);    gst_buffer_list_iterator_free (it);    GST_DEBUG_OBJECT (baseaudiopayload, "Pushing list %p", list);    ret = gst_basertppayload_push_list (basepayload, list);  } else {    /* copy payload */    payload = gst_rtp_buffer_get_payload (outbuf);    memcpy (payload, GST_BUFFER_DATA (buffer), payload_len);    gst_buffer_unref (buffer);    GST_DEBUG_OBJECT (baseaudiopayload, "Pushing buffer %p", outbuf);    ret = gst_basertppayload_push (basepayload, outbuf);  }  return ret;}
开发者ID:genesi,项目名称:gst-base-plugins,代码行数:61,



注:本文中的GST_TIME_ARGS函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ GST_TRACE函数代码示例
C++ GST_STR_NULL函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。