您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ AV_WB16函数代码示例

51自学网 2021-06-01 19:38:58
  C++
这篇教程C++ AV_WB16函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中AV_WB16函数的典型用法代码示例。如果您正苦于以下问题:C++ AV_WB16函数的具体用法?C++ AV_WB16怎么用?C++ AV_WB16使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了AV_WB16函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: rmff_dump_cont

static int rmff_dump_cont(rmff_cont_t *cont, char *buffer, int bufsize) {  int p;  if (!cont) return 0;  if (bufsize < RMFF_CONTHEADER_SIZE + cont->title_len + cont->author_len +      cont->copyright_len + cont->comment_len)    return -1;  AV_WB32(buffer, cont->object_id);  AV_WB32(buffer+4, cont->size);  AV_WB16(buffer+8, cont->object_version);  AV_WB16(buffer+10, cont->title_len);  memcpy(&buffer[12], cont->title, cont->title_len);  p=12+cont->title_len;  AV_WB16(buffer+p, cont->author_len);  memcpy(&buffer[p+2], cont->author, cont->author_len);  p+=2+cont->author_len;  AV_WB16(buffer+p, cont->copyright_len);  memcpy(&buffer[p+2], cont->copyright, cont->copyright_len);  p+=2+cont->copyright_len;  AV_WB16(buffer+p, cont->comment_len);  memcpy(&buffer[p+2], cont->comment, cont->comment_len);  return RMFF_CONTHEADER_SIZE + cont->title_len + cont->author_len +         cont->copyright_len + cont->comment_len;}
开发者ID:hanyong,项目名称:mplayer-kovensky,代码行数:32,


示例2: dnxhd_write_header

static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf){    DNXHDEncContext *ctx = avctx->priv_data;    const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };    memcpy(buf, header_prefix, 5);    buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;    buf[6] = 0x80; // crc flag off    buf[7] = 0xa0; // reserved    AV_WB16(buf + 0x18, avctx->height); // ALPF    AV_WB16(buf + 0x1a, avctx->width);  // SPL    AV_WB16(buf + 0x1d, avctx->height); // NAL    buf[0x21] = 0x38; // FIXME 8 bit per comp    buf[0x22] = 0x88 + (ctx->frame.interlaced_frame<<2);    AV_WB32(buf + 0x28, ctx->cid); // CID    buf[0x2c] = ctx->interlaced ? 0 : 0x80;    buf[0x5f] = 0x01; // UDL    buf[0x167] = 0x02; // reserved    AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4); // MSIPS    buf[0x16d] = ctx->m.mb_height; // Ns    buf[0x16f] = 0x10; // reserved    ctx->msip = buf + 0x170;    return 0;}
开发者ID:beequ7et,项目名称:cinelerra-cv,代码行数:28,


示例3: ff_rtp_send_aac

void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size){    RTPMuxContext *s = s1->priv_data;    AVStream *st = s1->streams[0];    const int max_au_headers_size = 2 + 2 * s->max_frames_per_packet;    int len, max_packet_size = s->max_payload_size - max_au_headers_size;    uint8_t *p;    /* skip ADTS header, if present */    if ((s1->streams[0]->codecpar->extradata_size) == 0) {        size -= 7;        buff += 7;    }    /* test if the packet must be sent */    len = (s->buf_ptr - s->buf);    if (s->num_frames &&        (s->num_frames == s->max_frames_per_packet ||         (len + size) > s->max_payload_size ||         av_compare_ts(s->cur_timestamp - s->timestamp, st->time_base,                       s1->max_delay, AV_TIME_BASE_Q) >= 0)) {        int au_size = s->num_frames * 2;        p = s->buf + max_au_headers_size - au_size - 2;        if (p != s->buf) {            memmove(p + 2, s->buf + 2, au_size);        }        /* Write the AU header size */        AV_WB16(p, au_size * 8);        ff_rtp_send_data(s1, p, s->buf_ptr - p, 1);        s->num_frames = 0;    }    if (s->num_frames == 0) {        s->buf_ptr = s->buf + max_au_headers_size;        s->timestamp = s->cur_timestamp;    }    if (size <= max_packet_size) {        p = s->buf + s->num_frames++ * 2 + 2;        AV_WB16(p, size * 8);        memcpy(s->buf_ptr, buff, size);        s->buf_ptr += size;    } else {        int au_size = size;        max_packet_size = s->max_payload_size - 4;        p = s->buf;        AV_WB16(p, 2 * 8);        while (size > 0) {            len = FFMIN(size, max_packet_size);            AV_WB16(&p[2], au_size * 8);            memcpy(p + 4, buff, len);            ff_rtp_send_data(s1, p, len + 4, len == size);            size -= len;            buff += len;        }    }}
开发者ID:AVLeo,项目名称:libav,代码行数:60,


示例4: mp3_update_xing

static void mp3_update_xing(AVFormatContext *s){    MP3Context  *mp3 = s->priv_data;    AVReplayGain *rg;    uint16_t tag_crc;    uint8_t *toc;    int i, rg_size;    /* replace "Xing" identification string with "Info" for CBR files. */    if (!mp3->has_variable_bitrate)        AV_WL32(mp3->xing_frame + mp3->xing_offset, MKTAG('I', 'n', 'f', 'o'));    AV_WB32(mp3->xing_frame + mp3->xing_offset + 8,  mp3->frames);    AV_WB32(mp3->xing_frame + mp3->xing_offset + 12, mp3->size);    toc    = mp3->xing_frame + mp3->xing_offset + 16;    toc[0] = 0;  // first toc entry has to be zero.    for (i = 1; i < XING_TOC_SIZE; ++i) {        int j = i * mp3->pos / XING_TOC_SIZE;        int seek_point = 256LL * mp3->bag[j] / mp3->size;        toc[i] = FFMIN(seek_point, 255);    }    /* write replaygain */    rg = (AVReplayGain*)av_stream_get_side_data(s->streams[0], AV_PKT_DATA_REPLAYGAIN,                                                &rg_size);    if (rg && rg_size >= sizeof(*rg)) {        uint16_t val;        AV_WB32(mp3->xing_frame + mp3->xing_offset + 131,                av_rescale(rg->track_peak, 1 << 23, 100000));        if (rg->track_gain != INT32_MIN) {            val  = FFABS(rg->track_gain / 10000) & ((1 << 9) - 1);            val |= (rg->track_gain < 0) << 9;            val |= 1 << 13;            AV_WB16(mp3->xing_frame + mp3->xing_offset + 135, val);        }        if (rg->album_gain != INT32_MIN) {            val  = FFABS(rg->album_gain / 10000) & ((1 << 9) - 1);            val |= (rg->album_gain < 0) << 9;            val |= 1 << 14;            AV_WB16(mp3->xing_frame + mp3->xing_offset + 137, val);        }    }    AV_WB32(mp3->xing_frame + mp3->xing_offset + XING_SIZE - 8, mp3->audio_size);    AV_WB16(mp3->xing_frame + mp3->xing_offset + XING_SIZE - 4, mp3->audio_crc);    tag_crc = av_crc(av_crc_get_table(AV_CRC_16_ANSI_LE), 0, mp3->xing_frame, 190);    AV_WB16(mp3->xing_frame + mp3->xing_offset + XING_SIZE - 2, tag_crc);    avio_seek(s->pb,  mp3->xing_frame_offset, SEEK_SET);    avio_write(s->pb, mp3->xing_frame, mp3->xing_frame_size);    avio_seek(s->pb, 0, SEEK_END);}
开发者ID:founderznd,项目名称:libav,代码行数:57,


示例5: qdm2_restore_block

/** * Adds a superblock header around a set of subpackets. * * @return <0 on error, else 0. */static int qdm2_restore_block(PayloadContext *qdm, AVStream *st, AVPacket *pkt){    int to_copy, n, res, include_csum;    uint8_t *p, *csum_pos = NULL;    /* create packet to hold subpkts into a superblock */    assert(qdm->cache > 0);    for (n = 0; n < 0x80; n++)        if (qdm->len[n] > 0)            break;    assert(n < 0x80);    if ((res = av_new_packet(pkt, qdm->block_size)) < 0)        return res;    memset(pkt->data, 0, pkt->size);    pkt->stream_index  = st->index;    p                  = pkt->data;    /* superblock header */    if (qdm->len[n] > 0xff) {        *p++ = qdm->block_type | 0x80;        AV_WB16(p, qdm->len[n]);        p   += 2;    } else {        *p++ = qdm->block_type;        *p++ = qdm->len[n];    }    if ((include_csum = (qdm->block_type == 2 || qdm->block_type == 4))) {        csum_pos = p;        p       += 2;    }    /* subpacket data */    to_copy = FFMIN(qdm->len[n], pkt->size - (p - pkt->data));    memcpy(p, qdm->buf[n], to_copy);    qdm->len[n] = 0;    /* checksum header */    if (include_csum) {        unsigned int total = 0;        uint8_t *q;        for (q = pkt->data; q < &pkt->data[qdm->block_size]; q++)            total += *q;        AV_WB16(csum_pos, (uint16_t) total);    }    return 0;}
开发者ID:upsilon,项目名称:libav,代码行数:54,


示例6: get_theora_extradata

static void get_theora_extradata(AVCodecContext *ctx,		GstStructure *in_struc){	const GValue *array;	const GValue *value;	GstBuffer *buf;	size_t size = 0;	uint8_t *p;	array = gst_structure_get_value(in_struc, "streamheader");	if (!array)		return;	/* get size */	for (unsigned i = 0; i < gst_value_array_get_size(array); i++) {		value = gst_value_array_get_value(array, i);		buf = gst_value_get_buffer(value);		size += buf->size + 2;	}	/* fill it up */	ctx->extradata = p = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);	for (unsigned i = 0; i < gst_value_array_get_size(array); i++) {		value = gst_value_array_get_value(array, i);		buf = gst_value_get_buffer(value);		AV_WB16(p, buf->size);		p += 2;		memcpy(p, buf->data, buf->size);		p += buf->size;	}	ctx->extradata_size = p - ctx->extradata;}
开发者ID:ceyusa,项目名称:gst-av,代码行数:32,


示例7: tcp_write_packet

static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st){    RTSPState *rt = s->priv_data;    AVFormatContext *rtpctx = rtsp_st->transport_priv;    uint8_t *buf, *ptr;    int size;    uint8_t interleave_header[4];    size = url_close_dyn_buf(rtpctx->pb, &buf);    ptr = buf;    while (size > 4) {        uint32_t packet_len = AV_RB32(ptr);        int id;        ptr += 4;        size -= 4;        if (packet_len > size || packet_len < 2)            break;        if (ptr[1] >= 200 && ptr[1] <= 204)            id = rtsp_st->interleaved_max; /* RTCP */        else            id = rtsp_st->interleaved_min; /* RTP */        interleave_header[0] = '$';        interleave_header[1] = id;        AV_WB16(interleave_header + 2, packet_len);        url_write(rt->rtsp_hd, interleave_header, 4);        url_write(rt->rtsp_hd, ptr, packet_len);        ptr += packet_len;        size -= packet_len;    }    av_free(buf);    url_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);    return 0;}
开发者ID:allweax,项目名称:ffmpeg-msvc,代码行数:33,


示例8: concatenate_packet

/** Concatenate an ogg_packet into the extradata. */static int concatenate_packet(unsigned int* offset,                              AVCodecContext* avc_context,                              const ogg_packet* packet){    const char* message = NULL;    int newsize = avc_context->extradata_size + 2 + packet->bytes;    int err = AVERROR_INVALIDDATA;    if (packet->bytes < 0) {        message = "ogg_packet has negative size";    } else if (packet->bytes > 0xffff) {        message = "ogg_packet is larger than 65535 bytes";    } else if (newsize < avc_context->extradata_size) {        message = "extradata_size would overflow";    } else {        if ((err = av_reallocp(&avc_context->extradata, newsize)) < 0) {            avc_context->extradata_size = 0;            message = "av_realloc failed";        }    }    if (message) {        av_log(avc_context, AV_LOG_ERROR, "concatenate_packet failed: %s/n", message);        return err;    }    avc_context->extradata_size = newsize;    AV_WB16(avc_context->extradata + (*offset), packet->bytes);    *offset += 2;    memcpy(avc_context->extradata + (*offset), packet->packet, packet->bytes);    (*offset) += packet->bytes;    return 0;}
开发者ID:Acidburn0zzz,项目名称:libav,代码行数:33,


示例9: adx_encode

static void adx_encode(ADXContext *c, uint8_t *adx, const int16_t *wav,                       ADXChannelState *prev, int channels){    PutBitContext pb;    int scale;    int i, j;    int s0, s1, s2, d;    int max = 0;    int min = 0;    s1 = prev->s1;    s2 = prev->s2;    for (i = 0, j = 0; j < 32; i += channels, j++) {        s0 = wav[i];        d = ((s0 << COEFF_BITS) - c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS;        if (max < d)            max = d;        if (min > d)            min = d;        s2 = s1;        s1 = s0;    }    if (max == 0 && min == 0) {        prev->s1 = s1;        prev->s2 = s2;        memset(adx, 0, BLOCK_SIZE);        return;    }    if (max / 7 > -min / 8)        scale = max / 7;    else        scale = -min / 8;    if (scale == 0)        scale = 1;    AV_WB16(adx, scale);    init_put_bits(&pb, adx + 2, 16);    s1 = prev->s1;    s2 = prev->s2;    for (i = 0, j = 0; j < 32; i += channels, j++) {        d = ((wav[i] << COEFF_BITS) - c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS;        d = av_clip(ROUNDED_DIV(d, scale), -8, 7);        put_sbits(&pb, 4, d);        s0 = ((d << COEFF_BITS) * scale + c->coeff[0] * s1 + c->coeff[1] * s2) >> COEFF_BITS;        s2 = s1;        s1 = s0;    }    prev->s1 = s1;    prev->s2 = s2;    flush_put_bits(&pb);}
开发者ID:839687571,项目名称:ffmpeg_sln_vs2013,代码行数:60,


示例10: avpriv_dca_convert_bitstream

int avpriv_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,                             int max_size){    uint32_t mrk;    int i, tmp;    PutBitContext pb;    if ((unsigned) src_size > (unsigned) max_size)        src_size = max_size;    mrk = AV_RB32(src);    switch (mrk) {    case DCA_SYNCWORD_CORE_BE:        memcpy(dst, src, src_size);        return src_size;    case DCA_SYNCWORD_CORE_LE:        for (i = 0; i < (src_size + 1) >> 1; i++) {            AV_WB16(dst, AV_RL16(src));            src += 2;            dst += 2;        }        return src_size;    case DCA_SYNCWORD_CORE_14B_BE:    case DCA_SYNCWORD_CORE_14B_LE:        init_put_bits(&pb, dst, max_size);        for (i = 0; i < (src_size + 1) >> 1; i++, src += 2) {            tmp = ((mrk == DCA_SYNCWORD_CORE_14B_BE) ? AV_RB16(src) : AV_RL16(src)) & 0x3FFF;            put_bits(&pb, 14, tmp);        }        flush_put_bits(&pb);        return (put_bits_count(&pb) + 7) >> 3;    default:        return AVERROR_INVALIDDATA;    }}
开发者ID:EdgarHz,项目名称:FFmpeg,代码行数:35,


示例11: sctp_wait_fd

static int sctp_wait_fd(int fd, int write){    int ev          = write ? POLLOUT : POLLIN;    struct pollfd p = { .fd = fd, .events = ev, .revents = 0 };    int ret;    ret = poll(&p, 1, 100);    return ret < 0 ? ff_neterrno() : p.revents & ev ? 0 : AVERROR(EAGAIN);}static int sctp_read(URLContext *h, uint8_t *buf, int size){    SCTPContext *s = h->priv_data;    int ret;    if (!(h->flags & AVIO_FLAG_NONBLOCK)) {        ret = sctp_wait_fd(s->fd, 0);        if (ret < 0)            return ret;    }    if (s->max_streams) {        /*StreamId is introduced as a 2byte code into the stream*/        struct sctp_sndrcvinfo info = { 0 };        ret = ff_sctp_recvmsg(s->fd, buf + 2, size - 2, NULL, 0, &info, 0);        AV_WB16(buf, info.sinfo_stream);        ret = ret < 0 ? ret : ret + 2;    } else        ret = recv(s->fd, buf, size, 0);    return ret < 0 ? ff_neterrno() : ret;}
开发者ID:SmartJog,项目名称:ffmpeg,代码行数:32,


示例12: jpeg_table_header

static void jpeg_table_header(AVCodecContext *avctx, PutBitContext *p,                              ScanTable *intra_scantable,                              uint16_t luma_intra_matrix[64],                              uint16_t chroma_intra_matrix[64],                              int hsample[3]){    int i, j, size;    uint8_t *ptr;    MpegEncContext *s = avctx->priv_data;    if (avctx->codec_id != AV_CODEC_ID_LJPEG) {        int matrix_count = 1 + !!memcmp(luma_intra_matrix,                                        chroma_intra_matrix,                                        sizeof(luma_intra_matrix[0]) * 64);    if (s->force_duplicated_matrix)        matrix_count = 2;    /* quant matrixes */    put_marker(p, DQT);    put_bits(p, 16, 2 + matrix_count * (1 + 64));    put_bits(p, 4, 0); /* 8 bit precision */    put_bits(p, 4, 0); /* table 0 */    for(i=0;i<64;i++) {        j = intra_scantable->permutated[i];        put_bits(p, 8, luma_intra_matrix[j]);    }        if (matrix_count > 1) {            put_bits(p, 4, 0); /* 8 bit precision */            put_bits(p, 4, 1); /* table 1 */            for(i=0;i<64;i++) {                j = intra_scantable->permutated[i];                put_bits(p, 8, chroma_intra_matrix[j]);            }        }    }    if(avctx->active_thread_type & FF_THREAD_SLICE){        put_marker(p, DRI);        put_bits(p, 16, 4);        put_bits(p, 16, (avctx->width-1)/(8*hsample[0]) + 1);    }    /* huffman table */    put_marker(p, DHT);    flush_put_bits(p);    ptr = put_bits_ptr(p);    put_bits(p, 16, 0); /* patched later */    size = 2;    size += put_huffman_table(p, 0, 0, avpriv_mjpeg_bits_dc_luminance,                              avpriv_mjpeg_val_dc);    size += put_huffman_table(p, 0, 1, avpriv_mjpeg_bits_dc_chrominance,                              avpriv_mjpeg_val_dc);    size += put_huffman_table(p, 1, 0, avpriv_mjpeg_bits_ac_luminance,                              avpriv_mjpeg_val_ac_luminance);    size += put_huffman_table(p, 1, 1, avpriv_mjpeg_bits_ac_chrominance,                              avpriv_mjpeg_val_ac_chrominance);    AV_WB16(ptr, size);}
开发者ID:DaveDaCoda,项目名称:mythtv,代码行数:59,


示例13: jpeg_put_comments

static void jpeg_put_comments(AVCodecContext *avctx, PutBitContext *p){    int size;    uint8_t *ptr;    if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {        /* JFIF header */        put_marker(p, APP0);        put_bits(p, 16, 16);        avpriv_put_string(p, "JFIF", 1); /* this puts the trailing zero-byte too */        /* The most significant byte is used for major revisions, the least         * significant byte for minor revisions. Version 1.02 is the current         * released revision. */        put_bits(p, 16, 0x0102);        put_bits(p,  8, 0);              /* units type: 0 - aspect ratio */        put_bits(p, 16, avctx->sample_aspect_ratio.num);        put_bits(p, 16, avctx->sample_aspect_ratio.den);        put_bits(p, 8, 0); /* thumbnail width */        put_bits(p, 8, 0); /* thumbnail height */    }    /* comment */    if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {        put_marker(p, COM);        flush_put_bits(p);        ptr = put_bits_ptr(p);        put_bits(p, 16, 0); /* patched later */        avpriv_put_string(p, LIBAVCODEC_IDENT, 1);        size = strlen(LIBAVCODEC_IDENT)+3;        AV_WB16(ptr, size);    }    if (((avctx->pix_fmt == AV_PIX_FMT_YUV420P ||          avctx->pix_fmt == AV_PIX_FMT_YUV422P ||          avctx->pix_fmt == AV_PIX_FMT_YUV444P) && avctx->color_range != AVCOL_RANGE_JPEG)        || avctx->color_range == AVCOL_RANGE_MPEG) {        put_marker(p, COM);        flush_put_bits(p);        ptr = put_bits_ptr(p);        put_bits(p, 16, 0); /* patched later */        avpriv_put_string(p, "CS=ITU601", 1);        size = strlen("CS=ITU601")+3;        AV_WB16(ptr, size);    }}
开发者ID:DaveDaCoda,项目名称:mythtv,代码行数:45,


示例14: text2movsub

static int text2movsub(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,                     uint8_t **poutbuf, int *poutbuf_size,                     const uint8_t *buf, int buf_size, int keyframe){    if (buf_size > 0xffff) return 0;    *poutbuf_size = buf_size + 2;    *poutbuf = av_malloc(*poutbuf_size + FF_INPUT_BUFFER_PADDING_SIZE);    AV_WB16(*poutbuf, buf_size);    memcpy(*poutbuf + 2, buf, buf_size);    return 1;}
开发者ID:0x0B501E7E,项目名称:ffmpeg,代码行数:10,


示例15: jpeg_put_comments

static void jpeg_put_comments(AVCodecContext *avctx, PutBitContext *p){    int size;    uint8_t *ptr;    if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {        /* JFIF header */        put_marker(p, APP0);        put_bits(p, 16, 16);        avpriv_put_string(p, "JFIF", 1); /* this puts the trailing zero-byte too */        put_bits(p, 16, 0x0102);         /* v 1.02 */        put_bits(p,  8, 0);              /* units type: 0 - aspect ratio */        put_bits(p, 16, avctx->sample_aspect_ratio.num);        put_bits(p, 16, avctx->sample_aspect_ratio.den);        put_bits(p, 8, 0); /* thumbnail width */        put_bits(p, 8, 0); /* thumbnail height */    }    /* comment */    if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {        put_marker(p, COM);        flush_put_bits(p);        ptr = put_bits_ptr(p);        put_bits(p, 16, 0); /* patched later */        avpriv_put_string(p, LIBAVCODEC_IDENT, 1);        size = strlen(LIBAVCODEC_IDENT)+3;        AV_WB16(ptr, size);    }    if (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||        avctx->pix_fmt == AV_PIX_FMT_YUV422P ||        avctx->pix_fmt == AV_PIX_FMT_YUV444P) {        put_marker(p, COM);        flush_put_bits(p);        ptr = put_bits_ptr(p);        put_bits(p, 16, 0); /* patched later */        avpriv_put_string(p, "CS=ITU601", 1);        size = strlen("CS=ITU601")+3;        AV_WB16(ptr, size);    }}
开发者ID:0Soul,项目名称:FFmpeg,代码行数:41,


示例16: encrypt_counter

static void encrypt_counter(struct AVAES *aes, uint8_t *iv, uint8_t *outbuf,                            int outlen){    int i, j, outpos;    for (i = 0, outpos = 0; outpos < outlen; i++) {        uint8_t keystream[16];        AV_WB16(&iv[14], i);        av_aes_crypt(aes, keystream, iv, 1, NULL, 0);        for (j = 0; j < 16 && outpos < outlen; j++, outpos++)            outbuf[outpos] ^= keystream[j];    }}
开发者ID:AVbin,项目名称:libav,代码行数:12,


示例17: rmff_dump_dataheader

static int rmff_dump_dataheader(rmff_data_t *data, char *buffer, int bufsize) {  if (!data) return 0;  if (bufsize < RMFF_DATAHEADER_SIZE)    return -1;  AV_WB32(buffer, data->object_id);  AV_WB32(buffer+4, data->size);  AV_WB16(buffer+8, data->object_version);  AV_WB32(buffer+10, data->num_packets);  AV_WB32(buffer+14, data->next_data_header);  return RMFF_DATAHEADER_SIZE;}
开发者ID:hanyong,项目名称:mplayer-kovensky,代码行数:15,


示例18: rmff_dump_mdpr

static int rmff_dump_mdpr(rmff_mdpr_t *mdpr, char *buffer, int bufsize) {  int s1, s2, s3;  if (!mdpr) return 0;  if (!(bufsize > RMFF_MDPRHEADER_SIZE + mdpr->stream_name_size + mdpr->mime_type_size &&        (unsigned)bufsize - RMFF_MDPRHEADER_SIZE - mdpr->stream_name_size - mdpr->mime_type_size > mdpr->type_specific_len))    return -1;  AV_WB32(buffer, mdpr->object_id);  AV_WB32(buffer+4, mdpr->size);  AV_WB16(buffer+8, mdpr->object_version);  AV_WB16(buffer+10, mdpr->stream_number);  AV_WB32(buffer+12, mdpr->max_bit_rate);  AV_WB32(buffer+16, mdpr->avg_bit_rate);  AV_WB32(buffer+20, mdpr->max_packet_size);  AV_WB32(buffer+24, mdpr->avg_packet_size);  AV_WB32(buffer+28, mdpr->start_time);  AV_WB32(buffer+32, mdpr->preroll);  AV_WB32(buffer+36, mdpr->duration);  buffer[40] = mdpr->stream_name_size;  s1=mdpr->stream_name_size;  memcpy(&buffer[41], mdpr->stream_name, s1);  buffer[41+s1] = mdpr->mime_type_size;  s2=mdpr->mime_type_size;  memcpy(&buffer[42+s1], mdpr->mime_type, s2);  AV_WB32(buffer+42+s1+s2, mdpr->type_specific_len);  s3=mdpr->type_specific_len;  memcpy(&buffer[46+s1+s2], mdpr->type_specific_data, s3);  return RMFF_MDPRHEADER_SIZE + s1 + s2 + s3;}
开发者ID:hanyong,项目名称:mplayer-kovensky,代码行数:36,


示例19: rmff_dump_prop

static int rmff_dump_prop(rmff_prop_t *prop, char *buffer, int bufsize) {  if (!prop) return 0;  if (bufsize < RMFF_PROPHEADER_SIZE)    return -1;  AV_WB32(buffer, prop->object_id);  AV_WB32(buffer+4, prop->size);  AV_WB16(buffer+8, prop->object_version);  AV_WB32(buffer+10, prop->max_bit_rate);  AV_WB32(buffer+14, prop->avg_bit_rate);  AV_WB32(buffer+18, prop->max_packet_size);  AV_WB32(buffer+22, prop->avg_packet_size);  AV_WB32(buffer+26, prop->num_packets);  AV_WB32(buffer+30, prop->duration);  AV_WB32(buffer+34, prop->preroll);  AV_WB32(buffer+38, prop->index_offset);  AV_WB32(buffer+42, prop->data_offset);  AV_WB16(buffer+46, prop->num_streams);  AV_WB16(buffer+48, prop->flags);  return RMFF_PROPHEADER_SIZE;}
开发者ID:hanyong,项目名称:mplayer-kovensky,代码行数:24,


示例20: rmff_dump_fileheader

static int rmff_dump_fileheader(rmff_fileheader_t *fileheader, char *buffer, int bufsize) {  if (!fileheader) return 0;  if (bufsize < RMFF_FILEHEADER_SIZE)    return -1;  AV_WB32(buffer, fileheader->object_id);  AV_WB32(buffer+4, fileheader->size);  AV_WB16(buffer+8, fileheader->object_version);  AV_WB32(buffer+10, fileheader->file_version);  AV_WB32(buffer+14, fileheader->num_headers);  return RMFF_FILEHEADER_SIZE;}
开发者ID:hanyong,项目名称:mplayer-kovensky,代码行数:15,


示例21: avc_parse_annexb

size_t avc_parse_annexb(BYTE *extra, int extrasize, BYTE *dst){  size_t dstSize = 0;  CH264Nalu Nalu;  Nalu.SetBuffer(extra, extrasize, 0);  while (Nalu.ReadNext()) {    if (Nalu.GetType() == NALU_TYPE_SPS || Nalu.GetType() == NALU_TYPE_PPS) {      size_t len = Nalu.GetDataLength();      AV_WB16(dst+dstSize, (uint16_t)len);      dstSize += 2;      memcpy(dst+dstSize, Nalu.GetDataBuffer(), Nalu.GetDataLength());      dstSize += Nalu.GetDataLength();    }  }  return dstSize;}
开发者ID:hiplayer,项目名称:mpc_hc,代码行数:17,


示例22: rgbpack_fields

static void rgbpack_fields(void *ctx_,                           uint8_t *src[AVS_MAX_COMPONENTS],                           int sstrides[AVS_MAX_COMPONENTS],                           uint8_t *dst[AVS_MAX_COMPONENTS],                           int dstrides[AVS_MAX_COMPONENTS],                           int w, int h){    RGBPackContext *ctx = ctx_;    uint8_t *rgb[3], *dest;    unsigned val;    int i, j, c;    rgb[0] = src[0];    rgb[1] = src[1];    rgb[2] = src[2];    dest   = dst[0];    for (j = 0; j < h; j++) {        for (i = 0; i < w; i++) {            val = 0;            if (ctx->inbpp <= 8) {                for (c = 0; c < 3; c++)                    val |= rgb[c][i] << ctx->shift[c];            } else {                for (c = 0; c < 3; c++)                    val |= AV_RN16(rgb[c] + i * 2) << ctx->shift[c];            }            switch (ctx->step) {            case 1:                dest[i] = val;                break;            case 2:                if (ctx->be) AV_WB16(dest + i * 2, val);                else         AV_WL16(dest + i * 2, val);                break;            case 4:                if (ctx->be) AV_WB32(dest + i * 4, val);                else         AV_WL32(dest + i * 4, val);                break;            }        }        for (c = 0; c < 3; c++)            rgb[c] += sstrides[0];        dest += dstrides[0];    }}
开发者ID:lu-zero,项目名称:avscale,代码行数:46,


示例23: decode_extradata_ps_mp4

/* There are (invalid) samples in the wild with mp4-style extradata, where the * parameter sets are stored unescaped (i.e. as RBSP). * This function catches the parameter set decoding failure and tries again * after escaping it */static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,                                   int err_recognition, void *logctx){    int ret;    ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);    if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {        GetByteContext gbc;        PutByteContext pbc;        uint8_t *escaped_buf;        int escaped_buf_size;        av_log(logctx, AV_LOG_WARNING,               "SPS decoding failure, trying again after escaping the NAL/n");        if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)            return AVERROR(ERANGE);        escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;        escaped_buf = av_mallocz(escaped_buf_size);        if (!escaped_buf)            return AVERROR(ENOMEM);        bytestream2_init(&gbc, buf, buf_size);        bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);        while (bytestream2_get_bytes_left(&gbc)) {            if (bytestream2_get_bytes_left(&gbc) >= 3 &&                bytestream2_peek_be24(&gbc) <= 3) {                bytestream2_put_be24(&pbc, 3);                bytestream2_skip(&gbc, 2);            } else                bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));        }        escaped_buf_size = bytestream2_tell_p(&pbc);        AV_WB16(escaped_buf, escaped_buf_size - 2);        ret = decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);        av_freep(&escaped_buf);        if (ret < 0)            return ret;    }    return 0;}
开发者ID:411697643,项目名称:FFmpeg,代码行数:49,


示例24: adx_encode

static void adx_encode(unsigned char *adx,const short *wav,PREV *prev){    int scale;    int i;    int s0,s1,s2,d;    int max=0;    int min=0;    int data[32];    s1 = prev->s1;    s2 = prev->s2;    for(i=0;i<32;i++) {        s0 = wav[i];        d = ((s0<<14) - SCALE1*s1 + SCALE2*s2)/BASEVOL;        data[i]=d;        if (max<d) max=d;        if (min>d) min=d;        s2 = s1;        s1 = s0;    }    prev->s1 = s1;    prev->s2 = s2;    /* -8..+7 */    if (max==0 && min==0) {        memset(adx,0,18);        return;    }    if (max/7>-min/8) scale = max/7;    else scale = -min/8;    if (scale==0) scale=1;    AV_WB16(adx, scale);    for(i=0;i<16;i++) {        adx[i+2] = ((data[i*2]/scale)<<4) | ((data[i*2+1]/scale)&0xf);    }}
开发者ID:cchatterj,项目名称:isabel,代码行数:41,


示例25: mov_text_encode_frame

static int mov_text_encode_frame(AVCodecContext *avctx, unsigned char *buf,                                 int bufsize, const AVSubtitle *sub){    MovTextContext *s = avctx->priv_data;    ASSDialog *dialog;    int i, len, num;    s->ptr = s->buffer;    s->end = s->ptr + sizeof(s->buffer);    for (i = 0; i < sub->num_rects; i++) {        if (sub->rects[i]->type != SUBTITLE_ASS) {            av_log(avctx, AV_LOG_ERROR, "Only SUBTITLE_ASS type supported./n");            return AVERROR(ENOSYS);        }        dialog = ff_ass_split_dialog(s->ass_ctx, sub->rects[i]->ass, 0, &num);        for (; dialog && num--; dialog++) {            ff_ass_split_override_codes(&mov_text_callbacks, s, dialog->text);        }    }    if (s->ptr == s->buffer)        return 0;    AV_WB16(buf, strlen(s->buffer));    buf += 2;    len = av_strlcpy(buf, s->buffer, bufsize - 2);    if (len > bufsize-3) {        av_log(avctx, AV_LOG_ERROR, "Buffer too small for ASS event./n");        return AVERROR(EINVAL);    }    return len + 2;}
开发者ID:26mansi,项目名称:FFmpeg,代码行数:38,


示例26: tcp_write_packet

static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st){    RTSPState *rt = s->priv_data;    AVFormatContext *rtpctx = rtsp_st->transport_priv;    uint8_t *buf, *ptr;    int size;    uint8_t *interleave_header, *interleaved_packet;    size = avio_close_dyn_buf(rtpctx->pb, &buf);    ptr = buf;    while (size > 4) {        uint32_t packet_len = AV_RB32(ptr);        int id;        /* The interleaving header is exactly 4 bytes, which happens to be         * the same size as the packet length header from         * ffio_open_dyn_packet_buf. So by writing the interleaving header         * over these bytes, we get a consecutive interleaved packet         * that can be written in one call. */        interleaved_packet = interleave_header = ptr;        ptr += 4;        size -= 4;        if (packet_len > size || packet_len < 2)            break;        if (ptr[1] >= RTCP_SR && ptr[1] <= RTCP_APP)            id = rtsp_st->interleaved_max; /* RTCP */        else            id = rtsp_st->interleaved_min; /* RTP */        interleave_header[0] = '$';        interleave_header[1] = id;        AV_WB16(interleave_header + 2, packet_len);        ffurl_write(rt->rtsp_hd_out, interleaved_packet, 4 + packet_len);        ptr += packet_len;        size -= packet_len;    }    av_free(buf);    ffio_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);    return 0;}
开发者ID:simock85,项目名称:libav,代码行数:38,


示例27: decode_frame

//.........这里部分代码省略.........    }    if (s->stripsizesoff) {        if (s->stripsizesoff >= (unsigned)avpkt->size)            return AVERROR_INVALIDDATA;        bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,                         avpkt->size - s->stripsizesoff);    }    if (s->strippos) {        if (s->strippos >= (unsigned)avpkt->size)            return AVERROR_INVALIDDATA;        bytestream2_init(&stripdata, avpkt->data + s->strippos,                         avpkt->size - s->strippos);    }    if (s->rps <= 0) {        av_log(avctx, AV_LOG_ERROR, "rps %d invalid/n", s->rps);        return AVERROR_INVALIDDATA;    }    planes = s->planar ? s->bppcount : 1;    for (plane = 0; plane < planes; plane++) {        stride = p->linesize[plane];        dst    = p->data[plane];    for (i = 0; i < s->height; i += s->rps) {        if (s->stripsizesoff)            ssize = ff_tget(&stripsizes, s->sstype, le);        else            ssize = s->stripsize;        if (s->strippos)            soff = ff_tget(&stripdata, s->sot, le);        else            soff = s->stripoff;        if (soff > avpkt->size || ssize > avpkt->size - soff) {            av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset/n");            return AVERROR_INVALIDDATA;        }        if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,                                     FFMIN(s->rps, s->height - i))) < 0) {            if (avctx->err_recognition & AV_EF_EXPLODE)                return ret;            break;        }        dst += s->rps * stride;    }    if (s->predictor == 2) {        if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {            av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");            return AVERROR_PATCHWELCOME;        }        dst   = p->data[plane];        soff  = s->bpp >> 3;        if (s->planar)            soff  = FFMAX(soff / s->bppcount, 1);        ssize = s->width * soff;        if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||            s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||            s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||            s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {            for (i = 0; i < s->height; i++) {                for (j = soff; j < ssize; j += 2)                    AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));                dst += stride;            }        } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||                   s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||                   s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||                   s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {            for (i = 0; i < s->height; i++) {                for (j = soff; j < ssize; j += 2)                    AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));                dst += stride;            }        } else {            for (i = 0; i < s->height; i++) {                for (j = soff; j < ssize; j++)                    dst[j] += dst[j - soff];                dst += stride;            }        }    }    if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {        dst = p->data[plane];        for (i = 0; i < s->height; i++) {            for (j = 0; j < p->linesize[plane]; j++)                dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];            dst += stride;        }    }    }    if (s->planar && s->bppcount > 2) {        FFSWAP(uint8_t*, p->data[0],     p->data[2]);        FFSWAP(int,      p->linesize[0], p->linesize[2]);        FFSWAP(uint8_t*, p->data[0],     p->data[1]);        FFSWAP(int,      p->linesize[0], p->linesize[1]);    }
开发者ID:venkatarajasekhar,项目名称:Qt,代码行数:101,


示例28: jpeg_create_header

static int jpeg_create_header(uint8_t *buf, int size, uint32_t type, uint32_t w,                              uint32_t h, const uint8_t *qtable, int nb_qtable,                              int dri){    PutByteContext pbc;    uint8_t *dht_size_ptr;    int dht_size, i;    bytestream2_init_writer(&pbc, buf, size);    /* Convert from blocks to pixels. */    w <<= 3;    h <<= 3;    /* SOI */    jpeg_put_marker(&pbc, SOI);    /* JFIF header */    jpeg_put_marker(&pbc, APP0);    bytestream2_put_be16(&pbc, 16);    bytestream2_put_buffer(&pbc, "JFIF", 5);    bytestream2_put_be16(&pbc, 0x0201);    bytestream2_put_byte(&pbc, 0);    bytestream2_put_be16(&pbc, 1);    bytestream2_put_be16(&pbc, 1);    bytestream2_put_byte(&pbc, 0);    bytestream2_put_byte(&pbc, 0);    if (dri) {        jpeg_put_marker(&pbc, DRI);        bytestream2_put_be16(&pbc, 4);        bytestream2_put_be16(&pbc, dri);    }    /* DQT */    jpeg_put_marker(&pbc, DQT);    bytestream2_put_be16(&pbc, 2 + nb_qtable * (1 + 64));    for (i = 0; i < nb_qtable; i++) {        bytestream2_put_byte(&pbc, i);        /* Each table is an array of 64 values given in zig-zag         * order, identical to the format used in a JFIF DQT         * marker segment. */        bytestream2_put_buffer(&pbc, qtable + 64 * i, 64);    }    /* DHT */    jpeg_put_marker(&pbc, DHT);    dht_size_ptr = pbc.buffer;    bytestream2_put_be16(&pbc, 0);    dht_size  = 2;    dht_size += jpeg_create_huffman_table(&pbc, 0, 0,avpriv_mjpeg_bits_dc_luminance,                                          avpriv_mjpeg_val_dc);    dht_size += jpeg_create_huffman_table(&pbc, 0, 1, avpriv_mjpeg_bits_dc_chrominance,                                          avpriv_mjpeg_val_dc);    dht_size += jpeg_create_huffman_table(&pbc, 1, 0, avpriv_mjpeg_bits_ac_luminance,                                          avpriv_mjpeg_val_ac_luminance);    dht_size += jpeg_create_huffman_table(&pbc, 1, 1, avpriv_mjpeg_bits_ac_chrominance,                                          avpriv_mjpeg_val_ac_chrominance);    AV_WB16(dht_size_ptr, dht_size);    /* SOF0 */    jpeg_put_marker(&pbc, SOF0);    bytestream2_put_be16(&pbc, 17); /* size */    bytestream2_put_byte(&pbc, 8); /* bits per component */    bytestream2_put_be16(&pbc, h);    bytestream2_put_be16(&pbc, w);    bytestream2_put_byte(&pbc, 3); /* number of components */    bytestream2_put_byte(&pbc, 1); /* component number */    bytestream2_put_byte(&pbc, (2 << 4) | (type ? 2 : 1)); /* hsample/vsample */    bytestream2_put_byte(&pbc, 0); /* matrix number */    bytestream2_put_byte(&pbc, 2); /* component number */    bytestream2_put_byte(&pbc, 1 << 4 | 1); /* hsample/vsample */    bytestream2_put_byte(&pbc, nb_qtable == 2 ? 1 : 0); /* matrix number */    bytestream2_put_byte(&pbc, 3); /* component number */    bytestream2_put_byte(&pbc, 1 << 4 | 1); /* hsample/vsample */    bytestream2_put_byte(&pbc, nb_qtable == 2 ? 1 : 0); /* matrix number */    /* SOS */    jpeg_put_marker(&pbc, SOS);    bytestream2_put_be16(&pbc, 12);    bytestream2_put_byte(&pbc, 3);    bytestream2_put_byte(&pbc, 1);    bytestream2_put_byte(&pbc, 0);    bytestream2_put_byte(&pbc, 2);    bytestream2_put_byte(&pbc, 17);    bytestream2_put_byte(&pbc, 3);    bytestream2_put_byte(&pbc, 17);    bytestream2_put_byte(&pbc, 0);    bytestream2_put_byte(&pbc, 63);    bytestream2_put_byte(&pbc, 0);    /* Return the length in bytes of the JPEG header. */    return bytestream2_tell_p(&pbc);}
开发者ID:markjreed,项目名称:vice-emu,代码行数:97,


示例29: filter

// Filter data through filterstatic int filter(struct af_instance* af, struct mp_audio* audio, int flags){    struct mp_audio *out = af->data;    af_ac3enc_t *s = af->priv;    int num_frames = (audio->samples + mp_audio_buffer_samples(s->pending))                     / s->in_samples;    int max_out_samples = s->out_samples * num_frames;    mp_audio_realloc_min(out, max_out_samples);    out->samples = 0;    while (audio->samples > 0) {        int ret;        int consumed_pending = 0;        struct mp_audio in_frame;        int pending = mp_audio_buffer_samples(s->pending);        if (pending == 0 && audio->samples >= s->in_samples) {            in_frame = *audio;            mp_audio_skip_samples(audio, s->in_samples);        } else {            if (pending > 0 && pending < s->in_samples) {                struct mp_audio tmp = *audio;                tmp.samples = MPMIN(tmp.samples, s->in_samples);                mp_audio_buffer_append(s->pending, &tmp);                mp_audio_skip_samples(audio, tmp.samples);            }            mp_audio_buffer_peek(s->pending, &in_frame);            if (in_frame.samples < s->in_samples)                break;            consumed_pending = s->in_samples;        }        in_frame.samples = s->in_samples;        AVFrame *frame = av_frame_alloc();        if (!frame) {            MP_FATAL(af, "Could not allocate memory /n");            return -1;        }        frame->nb_samples = s->in_samples;        frame->format = s->lavc_actx->sample_fmt;        frame->channel_layout = s->lavc_actx->channel_layout;        assert(in_frame.num_planes <= AV_NUM_DATA_POINTERS);        frame->extended_data = frame->data;        for (int n = 0; n < in_frame.num_planes; n++)            frame->data[n] = in_frame.planes[n];        frame->linesize[0] = s->in_samples * audio->sstride;        int ok;        ret = avcodec_encode_audio2(s->lavc_actx, &s->pkt, frame, &ok);        av_frame_free(&frame);        if (ret < 0 || !ok) {            MP_FATAL(af, "Encode failed./n");            return -1;        }        mp_audio_buffer_skip(s->pending, consumed_pending);        MP_DBG(af, "avcodec_encode_audio got %d, pending %d./n",               s->pkt.size, mp_audio_buffer_samples(s->pending));        int frame_size = s->pkt.size;        int header_len = 0;        char hdr[8];        if (s->cfg_add_iec61937_header && s->pkt.size > 5) {            int bsmod = s->pkt.data[5] & 0x7;            int len = frame_size;            frame_size = AC3_FRAME_SIZE * 2 * 2;            header_len = 8;            AV_WB16(hdr,     0xF872);   // iec 61937 syncword 1            AV_WB16(hdr + 2, 0x4E1F);   // iec 61937 syncword 2            hdr[4] = bsmod;             // bsmod            hdr[5] = 0x01;              // data-type ac3            AV_WB16(hdr + 6, len << 3); // number of bits in payload        }        size_t max_size = (max_out_samples - out->samples) * out->sstride;        if (frame_size > max_size)            abort();        char *buf = (char *)out->planes[0] + out->samples * out->sstride;        memcpy(buf, hdr, header_len);        memcpy(buf + header_len, s->pkt.data, s->pkt.size);        memset(buf + header_len + s->pkt.size, 0,               frame_size - (header_len + s->pkt.size));        out->samples += frame_size / out->sstride;    }    mp_audio_buffer_append(s->pending, audio);    *audio = *out;    return 0;}
开发者ID:Bl4Cc4t,项目名称:mpv,代码行数:97,


示例30: sap_write_header

//.........这里部分代码省略.........    contexts = av_mallocz(sizeof(AVFormatContext*) * s->nb_streams);    if (!contexts) {        ret = AVERROR(ENOMEM);        goto fail;    }    s->start_time_realtime = av_gettime();    for (i = 0; i < s->nb_streams; i++) {        URLContext *fd;        ff_url_join(url, sizeof(url), "rtp", NULL, host, base_port,                    "?ttl=%d", ttl);        if (!same_port)            base_port += 2;        ret = ffurl_open(&fd, url, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);        if (ret) {            ret = AVERROR(EIO);            goto fail;        }        s->streams[i]->priv_data = contexts[i] =            ff_rtp_chain_mux_open(s, s->streams[i], fd, 0);        av_strlcpy(contexts[i]->filename, url, sizeof(contexts[i]->filename));    }    ff_url_join(url, sizeof(url), "udp", NULL, announce_addr, port,                "?ttl=%d&connect=1", ttl);    ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_WRITE,                     &s->interrupt_callback, NULL);    if (ret) {        ret = AVERROR(EIO);        goto fail;    }    udp_fd = ffurl_get_file_handle(sap->ann_fd);    if (getsockname(udp_fd, (struct sockaddr*) &localaddr, &addrlen)) {        ret = AVERROR(EIO);        goto fail;    }    if (localaddr.ss_family != AF_INET#if HAVE_STRUCT_SOCKADDR_IN6        && localaddr.ss_family != AF_INET6#endif        ) {        av_log(s, AV_LOG_ERROR, "Unsupported protocol family/n");        ret = AVERROR(EIO);        goto fail;    }    sap->ann_size = 8192;    sap->ann = av_mallocz(sap->ann_size);    if (!sap->ann) {        ret = AVERROR(EIO);        goto fail;    }    sap->ann[pos] = (1 << 5);#if HAVE_STRUCT_SOCKADDR_IN6    if (localaddr.ss_family == AF_INET6)        sap->ann[pos] |= 0x10;#endif    pos++;    sap->ann[pos++] = 0; /* Authentication length */    AV_WB16(&sap->ann[pos], av_get_random_seed());    pos += 2;    if (localaddr.ss_family == AF_INET) {        memcpy(&sap->ann[pos], &((struct sockaddr_in*)&localaddr)->sin_addr,               sizeof(struct in_addr));        pos += sizeof(struct in_addr);#if HAVE_STRUCT_SOCKADDR_IN6    } else {        memcpy(&sap->ann[pos], &((struct sockaddr_in6*)&localaddr)->sin6_addr,               sizeof(struct in6_addr));        pos += sizeof(struct in6_addr);#endif    }    av_strlcpy(&sap->ann[pos], "application/sdp", sap->ann_size - pos);    pos += strlen(&sap->ann[pos]) + 1;    if (av_sdp_create(contexts, s->nb_streams, &sap->ann[pos],                       sap->ann_size - pos)) {        ret = AVERROR_INVALIDDATA;        goto fail;    }    av_freep(&contexts);    av_log(s, AV_LOG_VERBOSE, "SDP:/n%s/n", &sap->ann[pos]);    pos += strlen(&sap->ann[pos]);    sap->ann_size = pos;    if (sap->ann_size > sap->ann_fd->max_packet_size) {        av_log(s, AV_LOG_ERROR, "Announcement too large to send in one "                                "packet/n");        goto fail;    }    return 0;fail:    av_free(contexts);    sap_write_close(s);    return ret;}
开发者ID:RocDeanGit,项目名称:ffmpeg.net,代码行数:101,



注:本文中的AV_WB16函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ AV_WL16函数代码示例
C++ AV_RL32函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。