您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ ELOG_DEBUG函数代码示例

51自学网 2021-06-01 20:32:08
  C++
这篇教程C++ ELOG_DEBUG函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中ELOG_DEBUG函数的典型用法代码示例。如果您正苦于以下问题:C++ ELOG_DEBUG函数的具体用法?C++ ELOG_DEBUG怎么用?C++ ELOG_DEBUG使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了ELOG_DEBUG函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ELOG_DEBUG

 void WebRtcConnection::writeSsrc(char* buf, int len, unsigned int ssrc) {   ELOG_DEBUG("LEN %d", len);   RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);   RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);   //if it is RTCP we check it it is a compound packet   if (chead->isRtcp()) {           char* movingBuf = buf;     int rtcpLength = 0;     int totalLength = 0;     do{       movingBuf+=rtcpLength;       RtcpHeader *chead= reinterpret_cast<RtcpHeader*>(movingBuf);       rtcpLength= (ntohs(chead->length)+1)*4;             totalLength+= rtcpLength;       ELOG_DEBUG("Is RTCP, prev SSRC %u, new %u, len %d ", chead->getSSRC(), ssrc, rtcpLength);       chead->ssrc=htonl(ssrc);       if (chead->packettype == RTCP_PS_Feedback_PT){         FirHeader *thefir = reinterpret_cast<FirHeader*>(movingBuf);         if (thefir->fmt == 4){ // It is a FIR Packet, we generate it           this->sendPLI();         }       }     } while(totalLength<len);   } else {     head->setSSRC(ssrc);   } }
开发者ID:JiCiT,项目名称:licode,代码行数:27,


示例2: ELOG_DEBUG

bool NiceConnection::setRemoteCandidates(    std::vector<CandidateInfo> &candidates) {    ELOG_DEBUG("Setting remote candidates %d", candidates.size());    for (unsigned int compId = 1; compId <= iceComponents_; compId++) {        GSList* candList = NULL;        for (unsigned int it = 0; it < candidates.size(); it++) {            NiceCandidateType nice_cand_type;            CandidateInfo cinfo = candidates[it];            if (cinfo.mediaType != this->mediaType                    || this->transportName->compare(cinfo.transProtocol)                    || cinfo.componentId != compId)                continue;            switch (cinfo.hostType) {            case HOST:                nice_cand_type = NICE_CANDIDATE_TYPE_HOST;                break;            case SRLFX:                nice_cand_type = NICE_CANDIDATE_TYPE_SERVER_REFLEXIVE;                break;            case PRFLX:                nice_cand_type = NICE_CANDIDATE_TYPE_PEER_REFLEXIVE;                break;            case RELAY:                nice_cand_type = NICE_CANDIDATE_TYPE_RELAYED;                break;            default:                nice_cand_type = NICE_CANDIDATE_TYPE_HOST;                break;            }            NiceCandidate* thecandidate = nice_candidate_new(nice_cand_type);            NiceAddress* naddr = nice_address_new();            nice_address_set_from_string(naddr, cinfo.hostAddress.c_str());            nice_address_set_port(naddr, cinfo.hostPort);            thecandidate->addr = *naddr;            sprintf(thecandidate->foundation, "%s", cinfo.foundation.c_str());            thecandidate->username = strdup(cinfo.username.c_str());            thecandidate->password = strdup(cinfo.password.c_str());            thecandidate->stream_id = (guint) 1;            thecandidate->component_id = cinfo.componentId;            thecandidate->priority = cinfo.priority;            thecandidate->transport = NICE_CANDIDATE_TRANSPORT_UDP;            candList = g_slist_append(candList, thecandidate);            ELOG_DEBUG("New Candidate SET %s %d", cinfo.hostAddress.c_str(), cinfo.hostPort);        }        nice_agent_set_remote_candidates(agent_, (guint) 1, compId, candList);    }    ELOG_DEBUG("Candidates SET");    this->updateIceState(NICE_CANDIDATES_RECEIVED);    return true;}
开发者ID:peili,项目名称:licode,代码行数:60,


示例3: ELOG_DEBUG

  ExternalOutput::~ExternalOutput(){    ELOG_DEBUG("Destructor");    ELOG_DEBUG("Closing Sink");    delete in;    in = NULL;            if (context_!=NULL){      if (writeheadres_>=0)        av_write_trailer(context_);      if (avio_close>=0)        avio_close(context_->pb);      avformat_free_context(context_);      context_=NULL;    }    if (videoCodec_!=NULL){      avcodec_close(videoCodecCtx_);      videoCodec_=NULL;    }    if (audioCodec_!=NULL){      avcodec_close(audioCodecCtx_);      audioCodec_ = NULL;        }    sending_ = false;    cond_.notify_one();    thread_.join();    /* boost::unique_lock<boost::mutex> lock(queueMutex_); */    ELOG_DEBUG("ExternalOutput closed Successfully");  }
开发者ID:extradiable,项目名称:licode,代码行数:30,


示例4: avcodec_find_encoder

  bool OutputProcessor::initAudioCoder() {    aCoder = avcodec_find_encoder(static_cast<AVCodecID>(mediaInfo.audioCodec.codec));    if (!aCoder) {      ELOG_DEBUG("Encoder de audio no encontrado");      return false;    }    aCoderContext = avcodec_alloc_context3(aCoder);    if (!aCoderContext) {      ELOG_DEBUG("Error de memoria en coder de audio");      return false;    }    aCoderContext->sample_fmt = AV_SAMPLE_FMT_S16;    aCoderContext->bit_rate = mediaInfo.audioCodec.bitRate;    aCoderContext->sample_rate = mediaInfo.audioCodec.sampleRate;    aCoderContext->channels = 1;    if (avcodec_open2(aCoderContext, aCoder, NULL) < 0) {      ELOG_DEBUG("Error al abrir el coder de audio");      return false;    }    audioCoder = 1;    return true;  }
开发者ID:K-GmbH,项目名称:licode,代码行数:27,


示例5: getContext

void RRGenerationHandler::notifyUpdate() {  if (initialized_) {    return;  }  auto pipeline = getContext()->getPipelineShared();  if (!pipeline) {    return;  }  connection_ = pipeline->getService<WebRtcConnection>().get();  if (!connection_) {    return;  }  uint32_t video_ssrc = connection_->getVideoSourceSSRC();  if (video_ssrc != 0) {    auto video_packets = std::make_shared<RRPackets>();    video_packets->ssrc = video_ssrc;    video_packets->type = VIDEO_PACKET;    rr_info_map_[video_ssrc] = video_packets;    ELOG_DEBUG("%s, message: Initialized video, ssrc: %u", connection_->toLog(), video_ssrc);    initialized_ = true;  }  uint32_t audio_ssrc = connection_->getAudioSourceSSRC();  if (audio_ssrc != 0) {    auto audio_packets = std::make_shared<RRPackets>();    audio_packets->ssrc = audio_ssrc;    audio_packets->type = AUDIO_PACKET;    rr_info_map_[audio_ssrc] = audio_packets;    initialized_ = true;    ELOG_DEBUG("%s, message: Initialized audio, ssrc: %u", connection_->toLog(), audio_ssrc);  }}
开发者ID:ytjjyy,项目名称:licode,代码行数:34,


示例6: malloc

  int OutputProcessor::init(const MediaInfo& info, RTPDataReceiver* rtpReceiver) {    this->mediaInfo = info;    this->rtpReceiver_ = rtpReceiver;    encodedBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE);    packagedBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE);    rtpBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE);    if(info.processorType == PACKAGE_ONLY){      this->initVideoPackager();      this->initAudioPackager();      return 0;    }    if (mediaInfo.hasVideo) {      this->mediaInfo.videoCodec.codec = VIDEO_CODEC_VP8;      if (vCoder.initEncoder(mediaInfo.videoCodec)) {        ELOG_DEBUG("Error initing encoder");      }      this->initVideoPackager();    }    if (mediaInfo.hasAudio) {      ELOG_DEBUG("Init AUDIO processor");      mediaInfo.audioCodec.codec = AUDIO_CODEC_PCM_U8;      mediaInfo.audioCodec.sampleRate= 44100;      mediaInfo.audioCodec.bitRate = 64000;      encodedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE);      packagedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE);      this->initAudioCoder();      this->initAudioPackager();    }    return 0;  }
开发者ID:K-GmbH,项目名称:licode,代码行数:34,


示例7: lock

  void WebRtcConnection::onTransportData(char* buf, int len, Transport *transport) {    boost::mutex::scoped_lock lock(writeMutex_);    if (audioSink_ == NULL && videoSink_ == NULL && fbSink_==NULL)      return;    int length = len;    rtcpheader *chead = reinterpret_cast<rtcpheader*> (buf);    if (chead->packettype == RTCP_Receiver_PT || chead->packettype == RTCP_PS_Feedback_PT || chead->packettype == RTCP_RTP_Feedback_PT){      if (fbSink_ != NULL) {        fbSink_->deliverFeedback(buf,length);      }    } else {      // RTP or RTCP Sender Report      if (bundle_) {        // Check incoming SSRC        rtpheader *head = reinterpret_cast<rtpheader*> (buf);        rtcpheader *chead = reinterpret_cast<rtcpheader*> (buf);        unsigned int recvSSRC = ntohl(head->ssrc);        if (chead->packettype == RTCP_Sender_PT) { //Sender Report          ELOG_DEBUG ("RTP Sender Report %d length %d ", chead->packettype, ntohs(chead->length));          recvSSRC = ntohl(chead->ssrc);        }        // Deliver data        if (recvSSRC==this->getVideoSourceSSRC() || recvSSRC==this->getVideoSinkSSRC()) {          videoSink_->deliverVideoData(buf, length);        } else if (recvSSRC==this->getAudioSourceSSRC() || recvSSRC==this->getAudioSinkSSRC()) {          audioSink_->deliverAudioData(buf, length);        } else {          ELOG_DEBUG("Unknown SSRC %u, localVideo %u, remoteVideo %u, ignoring", recvSSRC, this->getVideoSourceSSRC(), this->getVideoSinkSSRC());        }      } else if (transport->mediaType == AUDIO_TYPE) {        if (audioSink_ != NULL) {          rtpheader *head = (rtpheader*) buf;          // Firefox does not send SSRC in SDP          if (this->getAudioSourceSSRC() == 0) {            ELOG_DEBUG("Audio Source SSRC is %u", ntohl(head->ssrc));            this->setAudioSourceSSRC(ntohl(head->ssrc));            this->updateState(TRANSPORT_READY, transport);          }          head->ssrc = htonl(this->getAudioSinkSSRC());          audioSink_->deliverAudioData(buf, length);        }      } else if (transport->mediaType == VIDEO_TYPE) {        if (videoSink_ != NULL) {          rtpheader *head = (rtpheader*) buf;          // Firefox does not send SSRC in SDP          if (this->getVideoSourceSSRC() == 0) {            ELOG_DEBUG("Video Source SSRC is %u", ntohl(head->ssrc));            this->setVideoSourceSSRC(ntohl(head->ssrc));            this->updateState(TRANSPORT_READY, transport);          }          head->ssrc = htonl(this->getVideoSinkSSRC());          videoSink_->deliverVideoData(buf, length);        }      }    }  }
开发者ID:jeuhom,项目名称:licode,代码行数:60,


示例8: cands

bool NicerConnection::setRemoteCandidates(const std::vector<CandidateInfo> &candidates, bool is_bundle) {  std::vector<CandidateInfo> cands(candidates);  auto remote_candidates_promise = std::make_shared<std::promise<void>>();  nr_ice_peer_ctx *peer = peer_;  nr_ice_media_stream *stream = stream_;  std::shared_ptr<NicerInterface> nicer = nicer_;  async([cands, is_bundle, nicer, peer, stream, this, remote_candidates_promise] {    ELOG_DEBUG("%s message: adding remote candidates (%ld)", toLog(), cands.size());    for (const CandidateInfo &cand : cands) {      std::string sdp = cand.sdp;      std::size_t pos = sdp.find(",");      std::string candidate = sdp.substr(0, pos);      ELOG_DEBUG("%s message: New remote ICE candidate (%s)", toLog(), candidate.c_str());      UINT4 r = nicer->IcePeerContextParseTrickleCandidate(peer, stream, const_cast<char *>(candidate.c_str()));      if (r && r != R_ALREADY) {        ELOG_WARN("%s message: Couldn't add remote ICE candidate (%s) (%d)", toLog(), candidate.c_str(), r);      }    }    remote_candidates_promise->set_value();  });  std::future<void> remote_candidates_future = remote_candidates_promise->get_future();  std::future_status status = remote_candidates_future.wait_for(std::chrono::seconds(1));  if (status == std::future_status::timeout) {    ELOG_WARN("%s message: Could not set remote candidates", toLog());    return false;  }  return true;}
开发者ID:notedit,项目名称:licode,代码行数:28,


示例9: ELOG_DEBUG

ExternalOutput::~ExternalOutput(){    ELOG_DEBUG("Destructing");    // Stop our thread so we can safely nuke libav stuff and close our    // our file.    recording_ = false;    cond_.notify_one();    thread_.join();    if (audio_stream_ != NULL && video_stream_ != NULL && context_ != NULL){        av_write_trailer(context_);    }    if (video_stream_ && video_stream_->codec != NULL){        avcodec_close(video_stream_->codec);    }    if (audio_stream_ && audio_stream_->codec != NULL){        avcodec_close(audio_stream_->codec);    }    if (context_ != NULL){        avio_close(context_->pb);        avformat_free_context(context_);        context_ = NULL;    }    ELOG_DEBUG("Closed Successfully");}
开发者ID:Lethea,项目名称:licode,代码行数:29,


示例10: ELOG_DEBUG

bool MediaStream::setRemoteSdp(std::shared_ptr<SdpInfo> sdp) {  ELOG_DEBUG("%s message: setting remote SDP", toLog());  remote_sdp_ = sdp;  if (remote_sdp_->videoBandwidth != 0) {    ELOG_DEBUG("%s message: Setting remote BW, maxVideoBW: %u", toLog(), remote_sdp_->videoBandwidth);    this->rtcp_processor_->setMaxVideoBW(remote_sdp_->videoBandwidth*1000);  }  if (pipeline_initialized_) {    pipeline_->notifyUpdate();    return true;  }  bundle_ = remote_sdp_->isBundle;  setVideoSourceSSRCList(remote_sdp_->video_ssrc_list);  setAudioSourceSSRC(remote_sdp_->audio_ssrc);  audio_enabled_ = remote_sdp_->hasAudio;  video_enabled_ = remote_sdp_->hasVideo;  rtcp_processor_->addSourceSsrc(getAudioSourceSSRC());  std::for_each(video_source_ssrc_list_.begin(), video_source_ssrc_list_.end(), [this] (uint32_t new_ssrc){      rtcp_processor_->addSourceSsrc(new_ssrc);  });  initializePipeline();  return true;}
开发者ID:mkhahani,项目名称:licode,代码行数:31,


示例11: avcodec_find_encoder

bool ExternalOutput::initContext() {    if (context_->oformat->video_codec != AV_CODEC_ID_NONE &&            context_->oformat->audio_codec != AV_CODEC_ID_NONE &&            video_stream_ == NULL &&            audio_stream_ == NULL) {        AVCodec* videoCodec = avcodec_find_encoder(context_->oformat->video_codec);        ELOG_DEBUG("Found Video Codec %s", videoCodec->name);        if (videoCodec==NULL) {            ELOG_ERROR("Could not find video codec");            return false;        }        video_stream_ = avformat_new_stream (context_, videoCodec);        video_stream_->id = 0;        video_stream_->codec->codec_id = context_->oformat->video_codec;        video_stream_->codec->width = 640;        video_stream_->codec->height = 480;        video_stream_->codec->time_base = (AVRational) {            1,30        };   // A decent guess here suffices; if processing the file with ffmpeg,        // use -vsync 0 to force it not to duplicate frames.        video_stream_->codec->pix_fmt = PIX_FMT_YUV420P;        if (context_->oformat->flags & AVFMT_GLOBALHEADER) {            video_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER;        }        context_->oformat->flags |= AVFMT_VARIABLE_FPS;        AVCodec* audioCodec = avcodec_find_encoder(context_->oformat->audio_codec);        if (audioCodec==NULL) {            ELOG_ERROR("Could not find audio codec");            return false;        }        ELOG_DEBUG("Found Audio Codec %s", audioCodec->name);        audio_stream_ = avformat_new_stream (context_, audioCodec);        audio_stream_->id = 1;        audio_stream_->codec->codec_id = context_->oformat->audio_codec;        audio_stream_->codec->sample_rate = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 8000 : 48000; // TODO is it always 48 khz for opus?        audio_stream_->codec->time_base = (AVRational) {            1, audio_stream_->codec->sample_rate        };        audio_stream_->codec->channels = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 1 : 2;   // TODO is it always two channels for opus?        if (context_->oformat->flags & AVFMT_GLOBALHEADER) {            audio_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER;        }        context_->streams[0] = video_stream_;        context_->streams[1] = audio_stream_;        if (avio_open(&context_->pb, context_->filename, AVIO_FLAG_WRITE) < 0) {            ELOG_ERROR("Error opening output file");            return false;        }        if (avformat_write_header(context_, NULL) < 0) {            ELOG_ERROR("Error writing header");            return false;        }        ELOG_DEBUG("avformat configured");    }    return true;}
开发者ID:hfeeki,项目名称:licode,代码行数:60,


示例12: lock

void DtlsTransport::onHandshakeCompleted(DtlsSocketContext* ctx, std::string clientKey, std::string serverKey, std::string srtp_profile){    boost::mutex::scoped_lock lock(sessionMutex_);    if (ctx == dtlsRtp_.get()) {        ELOG_DEBUG("%s - Setting RTP srtp params", transport_name.c_str());        srtp_.reset(new SrtpChannel());        if (srtp_->setRtpParams((char*)clientKey.c_str(), (char*)serverKey.c_str())) {            readyRtp_ = true;        } else {            updateTransportState(TRANSPORT_FAILED);        }        if (dtlsRtcp_ == NULL) {            readyRtcp_ = true;        }    }    if (ctx == dtlsRtcp_.get()) {        ELOG_DEBUG("%s - Setting RTCP srtp params", transport_name.c_str());        srtcp_.reset(new SrtpChannel());        if (srtcp_->setRtpParams((char*)clientKey.c_str(), (char*)serverKey.c_str())) {            readyRtcp_ = true;        } else {            updateTransportState(TRANSPORT_FAILED);        }    }    ELOG_DEBUG("%s - Ready? %d %d", transport_name.c_str(), readyRtp_, readyRtcp_);    if (readyRtp_ && readyRtcp_) {        ELOG_DEBUG("%s - Ready!!!", transport_name.c_str());        updateTransportState(TRANSPORT_READY);    }}
开发者ID:conclave,项目名称:licode,代码行数:30,


示例13: ELOG_DEBUG

  int VideoDecoder::initDecoder (const VideoCodecInfo& info){    ELOG_DEBUG("Init Decoder");    vDecoder = avcodec_find_decoder(VideoCodecID2ffmpegDecoderID(info.codec));    if (!vDecoder) {      ELOG_DEBUG("Error getting video decoder");      return -1;    }    vDecoderContext = avcodec_alloc_context3(vDecoder);    if (!vDecoderContext) {      ELOG_DEBUG("Error getting allocating decoder context");      return -1;    }    vDecoderContext->width = info.width;    vDecoderContext->height = info.height;    if (avcodec_open2(vDecoderContext, vDecoder, NULL) < 0) {      ELOG_DEBUG("Error opening video decoder");      return -1;    }    dPicture = av_frame_alloc();    if (!dPicture) {      ELOG_DEBUG("Error allocating video frame");      return -1;    }    return 0;  }
开发者ID:1322579329,项目名称:Erizo1,代码行数:30,


示例14: ELOG_DEBUG

  // memory is only valid for duration of callback; must be copied if queueing  // is required  DtlsSocketContext::DtlsSocketContext() {      started = false;      mSocket = NULL;      receiver = NULL;      DtlsSocketContext::Init();      ELOG_DEBUG("Creating Dtls factory, Openssl v %s", OPENSSL_VERSION_TEXT);      mContext = SSL_CTX_new(DTLSv1_method());      assert(mContext);      int r = SSL_CTX_use_certificate(mContext, mCert);      assert(r == 1);      r = SSL_CTX_use_PrivateKey(mContext, privkey);      assert(r == 1);      SSL_CTX_set_cipher_list(mContext, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");      SSL_CTX_set_info_callback(mContext, SSLInfoCallback);      SSL_CTX_set_verify(mContext, SSL_VERIFY_PEER |SSL_VERIFY_FAIL_IF_NO_PEER_CERT,        SSLVerifyCallback);      // SSL_CTX_set_session_cache_mode(mContext, SSL_SESS_CACHE_OFF);      // SSL_CTX_set_options(mContext, SSL_OP_NO_TICKET);      // Set SRTP profiles      r = SSL_CTX_set_tlsext_use_srtp(mContext, DefaultSrtpProfile);      assert(r == 0);      SSL_CTX_set_verify_depth(mContext, 2);      SSL_CTX_set_read_ahead(mContext, 1);      ELOG_DEBUG("DtlsSocketContext %p created", this);    }
开发者ID:mtdxc,项目名称:licode,代码行数:37,


示例15: ELOG_DEBUG

int AudioEncoder::initEncoder(const AudioCodecInfo& mediaInfo) {  ELOG_DEBUG("Init audioEncoder begin");  aCoder_ = avcodec_find_encoder(AudioCodecID2ffmpegDecoderID(mediaInfo.codec));  if (!aCoder_) {    ELOG_DEBUG("Audio Codec not found");    return false;  }  aCoderContext_ = avcodec_alloc_context3(aCoder_);  if (!aCoderContext_) {    ELOG_DEBUG("Memory error allocating audio coder context");    return false;  }  aCoderContext_->sample_fmt = AV_SAMPLE_FMT_FLT;  // aCoderContext_->bit_rate = mediaInfo.bitRate;  aCoderContext_->sample_rate = 8 /*mediaInfo.sampleRate*/;  aCoderContext_->channels = 1;  char errbuff[500];  int res = avcodec_open2(aCoderContext_, aCoder_, NULL);  if (res != 0) {    av_strerror(res, reinterpret_cast<char*>(&errbuff), 500);    ELOG_DEBUG("fail when opening input %s", errbuff);    return -1;  }  ELOG_DEBUG("Init audioEncoder end");  return true;}
开发者ID:ging,项目名称:licode,代码行数:28,


示例16: ELOG_DEBUG

  void NiceConnection::close() {    if(this->checkIceState()==NICE_FINISHED){      return;    }    running_ = false;    ELOG_DEBUG("Closing nice  %p", this);    this->updateIceState(NICE_FINISHED);    listener_ = NULL;    boost::system_time const timeout=boost::get_system_time()+ boost::posix_time::milliseconds(500);    ELOG_DEBUG("m_thread join %p", this);    if (!m_Thread_.timed_join(timeout) ){      ELOG_DEBUG("Taking too long to close thread, trying to interrupt %p", this);      m_Thread_.interrupt();    }    {   // New scope for lock.        boost::unique_lock<boost::mutex> lock(agentMutex_);        if (agent_!=NULL){          g_object_unref(agent_);          agent_ = NULL;        }        if (context_!=NULL) {          g_main_context_unref(context_);          context_=NULL;        }    }    this->queueData(1, NULL, -1 );    ELOG_DEBUG("Nice Closed %p", this);  }
开发者ID:K-GmbH,项目名称:licode,代码行数:30,


示例17: ELOG_DEBUG

  int OutputProcessor::encodeAudio(unsigned char* inBuff, int nSamples,      AVPacket* pkt) {    if (audioCoder == 0) {      ELOG_DEBUG("No se han inicializado los parámetros del audioCoder");      return -1;    }    AVFrame *frame;    /* frame containing input raw audio */    frame = avcodec_alloc_frame();    if (!frame) {      ELOG_ERROR("could not allocate audio frame");      exit(1);    }    uint16_t* samples;    int ret, got_output, buffer_size;    //float t, tincr;    frame->nb_samples = aCoderContext->frame_size;    frame->format = aCoderContext->sample_fmt;    //	frame->channel_layout = aCoderContext->channel_layout;    /* the codec gives us the frame size, in samples,     * we calculate the size of the samples buffer in bytes */    ELOG_DEBUG("channels %d, frame_size %d, sample_fmt %d",        aCoderContext->channels, aCoderContext->frame_size,        aCoderContext->sample_fmt);    buffer_size = av_samples_get_buffer_size(NULL, aCoderContext->channels,        aCoderContext->frame_size, aCoderContext->sample_fmt, 0);    samples = (uint16_t*) av_malloc(buffer_size);    if (!samples) {      ELOG_ERROR("could not allocate %d bytes for samples buffer",          buffer_size);      exit(1);    }    /* setup the data pointers in the AVFrame */    ret = avcodec_fill_audio_frame(frame, aCoderContext->channels,        aCoderContext->sample_fmt, (const uint8_t*) samples, buffer_size,        0);    if (ret < 0) {      ELOG_ERROR("could not setup audio frame");      exit(1);    }    ret = avcodec_encode_audio2(aCoderContext, pkt, frame, &got_output);    if (ret < 0) {      ELOG_ERROR("error encoding audio frame");      exit(1);    }    if (got_output) {      //fwrite(pkt.data, 1, pkt.size, f);      ELOG_DEBUG("Got OUTPUT");    }    return ret;  }
开发者ID:GaijinKa,项目名称:licode,代码行数:58,


示例18: NOTIFY

  void RtpPacketQueue::pushPacket(const char *data, int length)  {    const RTPHeader *header = reinterpret_cast<const RTPHeader*>(data);    uint16_t nseq = header->getSeqNumber();    uint32_t ts = header->getTimestamp();    long long int ltsdiff = (long long int)ts - (long long int)lastTs_;    int tsdiff = (int)ltsdiff;    int nseqdiff = nseq - lastNseq_;    /*    // nseq sequence cicle test    if ( abs(nseqdiff) > ( USHRT_MAX - MAX_DIFF ) )    {    NOTIFY("Vuelta del NSeq ns=%d last=%d/n", nseq, lastNseq_);    if (nseqdiff > 0)    nseqdiff-= (USHRT_MAX + 1);    else    nseqdiff+= (USHRT_MAX + 1);    }    */    if (abs(tsdiff) > MAX_DIFF_TS || abs(nseqdiff) > MAX_DIFF )    {      // new flow, process and clean queue      ELOG_DEBUG("Max diff reached, new Flow? nsqediff %d , tsdiff %d", nseqdiff, tsdiff);      ELOG_DEBUG("PT %d", header->getPayloadType());      lastNseq_ = nseq;      lastTs_ = ts;      cleanQueue();      enqueuePacket(data, length, nseq);    }    else if (nseqdiff > 1)    {      // Jump in nseq, enqueue      ELOG_DEBUG("Jump in nseq");      enqueuePacket(data, length, nseq);    }    else if (nseqdiff == 1)    {      // next packet, process      lastNseq_ = nseq;      lastTs_ = ts;      enqueuePacket(data, length, nseq);    }    else if (nseqdiff < 0)    {      ELOG_DEBUG("Old Packet Received");      // old packet, discard?      // stats?    }    else if (nseqdiff == 0)    {      ELOG_DEBUG("Duplicate Packet received");      //duplicate packet, process (for stats)?    }  }
开发者ID:hanumesh,项目名称:licode,代码行数:57,


示例19: ELOG_DEBUG

DtlsTransport::~DtlsTransport(){    ELOG_DEBUG("DtlsTransport destructor");    running_ = false;    nice_->close();    ELOG_DEBUG("Join thread getNice");    getNice_Thread_.join();    ELOG_DEBUG("DTLSTransport destructor END");}
开发者ID:conclave,项目名称:licode,代码行数:9,


示例20: ELOG_DEBUG

  bool WebRtcConnection::setRemoteSdp(const std::string &sdp) {    ELOG_DEBUG("Set Remote SDP %s", sdp.c_str());    remoteSdp_.initWithSdp(sdp);    //std::vector<CryptoInfo> crypto_remote = remoteSdp_.getCryptoInfos();    video_ = (remoteSdp_.videoSsrc==0?false:true);    audio_ = (remoteSdp_.audioSsrc==0?false:true);    CryptoInfo cryptLocal_video;    CryptoInfo cryptLocal_audio;    CryptoInfo cryptRemote_video;    CryptoInfo cryptRemote_audio;    bundle_ = remoteSdp_.isBundle;    ELOG_DEBUG("Is bundle? %d %d ", bundle_, true);    std::vector<RtpMap> payloadRemote = remoteSdp_.getPayloadInfos();    localSdp_.getPayloadInfos() = remoteSdp_.getPayloadInfos();    localSdp_.isBundle = bundle_;    localSdp_.isRtcpMux = remoteSdp_.isRtcpMux;    ELOG_DEBUG("Video %d videossrc %u Audio %d audio ssrc %u Bundle %d", video_, remoteSdp_.videoSsrc, audio_, remoteSdp_.audioSsrc,  bundle_);    ELOG_DEBUG("Setting SSRC to localSdp %u", this->getVideoSinkSSRC());    localSdp_.videoSsrc = this->getVideoSinkSSRC();    localSdp_.audioSsrc = this->getAudioSinkSSRC();    this->setVideoSourceSSRC(remoteSdp_.videoSsrc);    this->setAudioSourceSSRC(remoteSdp_.audioSsrc);    if (remoteSdp_.profile == SAVPF) {      if (remoteSdp_.isFingerprint) {        // DTLS-SRTP        if (remoteSdp_.hasVideo) {          videoTransport_ = new DtlsTransport(VIDEO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, this, stunServer_, stunPort_, minPort_, maxPort_);        }        if (remoteSdp_.hasAudio) {          audioTransport_ = new DtlsTransport(AUDIO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, this, stunServer_, stunPort_, minPort_, maxPort_);        }      } else {        // SDES        std::vector<CryptoInfo> crypto_remote = remoteSdp_.getCryptoInfos();        for (unsigned int it = 0; it < crypto_remote.size(); it++) {          CryptoInfo cryptemp = crypto_remote[it];          if (cryptemp.mediaType == VIDEO_TYPE              && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) {            videoTransport_ = new SdesTransport(VIDEO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, &cryptemp, this, stunServer_, stunPort_, minPort_, maxPort_);          } else if (!bundle_ && cryptemp.mediaType == AUDIO_TYPE              && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) {            audioTransport_ = new SdesTransport(AUDIO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, &cryptemp, this, stunServer_, stunPort_, minPort_, maxPort_);          }        }      }    }    return true;  }
开发者ID:jeuhom,项目名称:licode,代码行数:55,


示例21: ELOG_TRACE

void DtlsSocketContext::handshakeCompleted(){    char fprint[100];    SRTP_PROTECTION_PROFILE* srtp_profile;    if (mSocket->getRemoteFingerprint(fprint)) {        ELOG_TRACE("Remote fingerprint == %s", fprint);        bool check = mSocket->checkFingerprint(fprint, strlen(fprint));        ELOG_DEBUG("Fingerprint check == %d", check);        SrtpSessionKeys* keys = mSocket->getSrtpSessionKeys();        unsigned char* cKey = (unsigned char*)malloc(keys->clientMasterKeyLen + keys->clientMasterSaltLen);        unsigned char* sKey = (unsigned char*)malloc(keys->serverMasterKeyLen + keys->serverMasterSaltLen);        memcpy(cKey, keys->clientMasterKey, keys->clientMasterKeyLen);        memcpy(cKey + keys->clientMasterKeyLen, keys->clientMasterSalt, keys->clientMasterSaltLen);        memcpy(sKey, keys->serverMasterKey, keys->serverMasterKeyLen);        memcpy(sKey + keys->serverMasterKeyLen, keys->serverMasterSalt, keys->serverMasterSaltLen);        // g_base64_encode must be free'd with g_free.  Also, std::string's assignment operator does *not* take        // ownership of the passed in ptr; under the hood it copies up to the first nullptr character.        gchar* temp = g_base64_encode((const guchar*)cKey, keys->clientMasterKeyLen + keys->clientMasterSaltLen);        std::string clientKey = temp;        g_free(temp);        temp = nullptr;        temp = g_base64_encode((const guchar*)sKey, keys->serverMasterKeyLen + keys->serverMasterSaltLen);        std::string serverKey = temp;        g_free(temp);        temp = nullptr;        ELOG_DEBUG("ClientKey: %s", clientKey.c_str());        ELOG_DEBUG("ServerKey: %s", serverKey.c_str());        free(cKey);        free(sKey);        delete keys;        srtp_profile = mSocket->getSrtpProfile();        if (srtp_profile) {            ELOG_DEBUG("SRTP Extension negotiated profile=%s", srtp_profile->name);        }        if (receiver != nullptr) {            receiver->onHandshakeCompleted(this, clientKey, serverKey, srtp_profile->name);        }    } else {        ELOG_DEBUG("Peer did not authenticate");    }}
开发者ID:conclave,项目名称:licode,代码行数:54,


示例22: if

void MediaStream::read(std::shared_ptr<DataPacket> packet) {  char* buf = packet->data;  int len = packet->length;  // PROCESS RTCP  RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);  RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);  uint32_t recvSSRC = 0;  if (!chead->isRtcp()) {    recvSSRC = head->getSSRC();  } else if (chead->packettype == RTCP_Sender_PT) {  // Sender Report    recvSSRC = chead->getSSRC();  }  // DELIVER FEEDBACK (RR, FEEDBACK PACKETS)  if (chead->isFeedback()) {    if (fb_sink_ != nullptr && should_send_feedback_) {      fb_sink_->deliverFeedback(std::move(packet));    }  } else {    // RTP or RTCP Sender Report    if (bundle_) {      // Check incoming SSRC      // Deliver data      if (isVideoSourceSSRC(recvSSRC)) {        parseIncomingPayloadType(buf, len, VIDEO_PACKET);        video_sink_->deliverVideoData(std::move(packet));      } else if (isAudioSourceSSRC(recvSSRC)) {        parseIncomingPayloadType(buf, len, AUDIO_PACKET);        audio_sink_->deliverAudioData(std::move(packet));      } else {        ELOG_DEBUG("%s read video unknownSSRC: %u, localVideoSSRC: %u, localAudioSSRC: %u",                    toLog(), recvSSRC, this->getVideoSourceSSRC(), this->getAudioSourceSSRC());      }    } else {      if (packet->type == AUDIO_PACKET && audio_sink_ != nullptr) {        parseIncomingPayloadType(buf, len, AUDIO_PACKET);        // Firefox does not send SSRC in SDP        if (getAudioSourceSSRC() == 0) {          ELOG_DEBUG("%s discoveredAudioSourceSSRC:%u", toLog(), recvSSRC);          this->setAudioSourceSSRC(recvSSRC);        }        audio_sink_->deliverAudioData(std::move(packet));      } else if (packet->type == VIDEO_PACKET && video_sink_ != nullptr) {        parseIncomingPayloadType(buf, len, VIDEO_PACKET);        // Firefox does not send SSRC in SDP        if (getVideoSourceSSRC() == 0) {          ELOG_DEBUG("%s discoveredVideoSourceSSRC:%u", toLog(), recvSSRC);          this->setVideoSourceSSRC(recvSSRC);        }        // change ssrc for RTP packets, don't touch here if RTCP        video_sink_->deliverVideoData(std::move(packet));      }    }  // if not bundle  }  // if not Feedback}
开发者ID:mkhahani,项目名称:licode,代码行数:54,


示例23: swr_alloc_set_opts

    /**     * Initialize the audio resampler based on the input and output codec settings.     * If the input and output sample formats differ, a conversion is required     * libswresample takes care of this, but requires initialization.     */    int AudioDecoder::init_resampler(AVCodecContext *input_codec_context,            AVCodecContext *output_codec_context)    {        int error;        /**         * Create a resampler context for the conversion.         * Set the conversion parameters.         * Default channel layouts based on the number of channels         * are assumed for simplicity (they are sometimes not detected         * properly by the demuxer and/or decoder).         */        resample_context = swr_alloc_set_opts(NULL,                av_get_default_channel_layout(output_codec_context->channels),                output_codec_context->sample_fmt,                output_codec_context->sample_rate,                av_get_default_channel_layout(input_codec_context->channels),                input_codec_context->sample_fmt,                input_codec_context->sample_rate,                0, NULL);        if (!resample_context) {            ELOG_WARN( "Could not allocate resample context/n");            return AVERROR(ENOMEM);        }        /**         * Perform a sanity check so that the number of converted samples is         * not greater than the number of samples to be converted.         * If the sample rates differ, this case has to be handled differently         */        ELOG_DEBUG( "audio input sample_rate = %d, out %d", input_codec_context->sample_rate, output_codec_context->sample_rate);        /** Open the resampler with the specified parameters. */        if ((error = swr_init(resample_context)) < 0) {            ELOG_WARN( "Could not open resample context");            swr_free(&resample_context);            return error;        }        /** Open the resampler with the specified parameters. */        if ((error = swr_init(resample_context)) < 0) {            ELOG_DEBUG( "Could not open resample context");            swr_free(&resample_context);            return error;        }        ELOG_DEBUG( "swr_init done");        return 0;    }
开发者ID:fanchuanster,项目名称:erizo_externalinput,代码行数:59,


示例24: ELOG_DEBUG

ExternalInput::~ExternalInput() {  ELOG_DEBUG("Destructor ExternalInput %s" , url_.c_str());  ELOG_DEBUG("Closing ExternalInput");  running_ = false;  thread_.join();  if (needTranscoding_)    encodeThread_.join();  av_free_packet(&avpacket_);  if (context_ != NULL)    avformat_free_context(context_);  ELOG_DEBUG("ExternalInput closed");}
开发者ID:mtdxc,项目名称:licode,代码行数:12,


示例25: ELOG_DEBUG

 void NiceConnection::init() {      ELOG_DEBUG("Gathering candidates %p", this);   nice_agent_gather_candidates(agent_, 1);      // Attach to the component to receive the data   while(running_){     if(this->checkIceState()>=NICE_FINISHED || !running_)       break;     g_main_context_iteration(context_, true);   }   ELOG_DEBUG("LibNice thread finished %p", this); }
开发者ID:southwolf,项目名称:licode,代码行数:12,


示例26: ELOG_DEBUG

    int AudioDecoder::decodeAudio(AVPacket& input_packet, AVPacket& outPacket)    {        ELOG_DEBUG("decoding input packet, size %d", input_packet.size);                AVFrame* input_frame;        init_frame(&input_frame);        int data_present;        int error = avcodec_decode_audio4(input_codec_context, input_frame, &data_present,&input_packet);        if (error < 0)        {            ELOG_DEBUG("decoding error %s", get_error_text(error));            return error;        }        if (data_present <= 0)        {            ELOG_DEBUG("data not present");            return 0;        }        // resample        /** Initialize the temporary storage for the converted input samples. */        uint8_t **converted_input_samples = NULL;        if (init_converted_samples(&converted_input_samples, output_codec_context, input_frame->nb_samples))        {            ELOG_DEBUG("init_converted_samples fails");            return 0;        }        /**         * Convert the input samples to the desired output sample format.         * This requires a temporary storage provided by converted_input_samples         */        if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,input_frame->nb_samples, resample_context))        {            ELOG_WARN("convert_samples failed!!");            return 0;        }        /** Add converted input samples to the FIFO buffer for later processing. */        if (add_samples_to_fifo(fifo, converted_input_samples,                    input_frame->nb_samples))        {            ELOG_WARN("add_samples to fifo failed !!");        }        outPacket.pts = input_packet.pts;        // meanwhile, encode; package        return load_encode(outPacket);    }
开发者ID:fanchuanster,项目名称:erizo_externalinput,代码行数:53,


示例27: av_read_play

void ExternalInput::receiveLoop() {  av_read_play(context_);  // play RTSP  int gotDecodedFrame = 0;  int length;  startTime_ = av_gettime();  ELOG_DEBUG("Start playing external input %s", url_.c_str() );  while (av_read_frame(context_, &avpacket_) >= 0&& running_ == true) {    AVPacket orig_pkt = avpacket_;    if (needTranscoding_) {      if (avpacket_.stream_index == video_stream_index_) {  // packet is video        inCodec_.decodeVideo(avpacket_.data, avpacket_.size, decodedBuffer_.get(), bufflen_, &gotDecodedFrame);        RawDataPacket packetR;        if (gotDecodedFrame) {          packetR.data = decodedBuffer_.get();          packetR.length = bufflen_;          packetR.type = VIDEO;          queueMutex_.lock();          packetQueue_.push(packetR);          queueMutex_.unlock();          gotDecodedFrame = 0;        }      }    } else {      if (avpacket_.stream_index == video_stream_index_) {  // packet is video        // av_rescale(input, new_scale, old_scale)        int64_t pts = av_rescale(lastPts_, 1000000, (long int) video_time_base_);  // NOLINT        int64_t now = av_gettime() - startTime_;        if (pts > now) {          av_usleep(pts - now);        }        lastPts_ = avpacket_.pts;        op_->packageVideo(avpacket_.data, avpacket_.size, decodedBuffer_.get(), avpacket_.pts);      } else if (avpacket_.stream_index == audio_stream_index_) {  // packet is audio        int64_t pts = av_rescale(lastAudioPts_, 1000000, (long int)audio_time_base_);  // NOLINT        int64_t now = av_gettime() - startTime_;        if (pts > now) {          av_usleep(pts - now);        }        lastAudioPts_ = avpacket_.pts;        length = op_->packageAudio(avpacket_.data, avpacket_.size, decodedBuffer_.get(), avpacket_.pts);        if (length > 0) {          audioSink_->deliverAudioData(reinterpret_cast<char*>(decodedBuffer_.get()), length);        }      }    }    av_free_packet(&orig_pkt);  }  ELOG_DEBUG("Ended stream to play %s", url_.c_str());  running_ = false;  av_read_pause(context_);}
开发者ID:mtdxc,项目名称:licode,代码行数:52,



注:本文中的ELOG_DEBUG函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ EMAddGraphicsToModel函数代码示例
C++ ELOG函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。