原文出处:WebRTC Video Receiver(一)-模块创建分析

1)前言

2)WebRtcVideoReceiveStream的创建流程

bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp,
                                        bool default_stream) {
  RTC_DCHECK_RUN_ON(&thread_checker_);
  .....
  uint32_t ssrc = sp.first_ssrc();
  ....  
  receive_streams_[ssrc] = new WebRtcVideoReceiveStream(
      this, call_, sp, std::move(config), decoder_factory_, default_stream,
      recv_codecs_, flexfec_config);
  return true;
}

WebRtcVideoChannel::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
    WebRtcVideoChannel* channel,
    webrtc::Call* call,
    const StreamParams& sp,
    webrtc::VideoReceiveStream::Config config,
    webrtc::VideoDecoderFactory* decoder_factory,
    bool default_stream,
    const std::vector<VideoCodecSettings>& recv_codecs,
    const webrtc::FlexfecReceiveStream::Config& flexfec_config)
    : channel_(channel),
      call_(call),
      stream_params_(sp),
      stream_(NULL),
      default_stream_(default_stream),
      config_(std::move(config)),
      flexfec_config_(flexfec_config),
      flexfec_stream_(nullptr),
      decoder_factory_(decoder_factory),
      sink_(NULL),
      first_frame_timestamp_(-1),
      estimated_remote_start_ntp_time_ms_(0) {
  config_.renderer = this;
  ConfigureCodecs(recv_codecs);
  ConfigureFlexfecCodec(flexfec_config.payload_type);
  MaybeRecreateWebRtcFlexfecStream();
  RecreateWebRtcVideoStream();
}
void WebRtcVideoChannel::WebRtcVideoReceiveStream::ConfigureCodecs(
    const std::vector<VideoCodecSettings>& recv_codecs) {
  RTC_DCHECK(!recv_codecs.empty());
  config_.decoders.clear();
  config_.rtp.rtx_associated_payload_types.clear();
  config_.rtp.raw_payload_types.clear();
  for (const auto& recv_codec : recv_codecs) {
    webrtc::SdpVideoFormat video_format(recv_codec.codec.name,
                                        recv_codec.codec.params);
    webrtc::VideoReceiveStream::Decoder decoder;
    decoder.decoder_factory = decoder_factory_;
    decoder.video_format = video_format;
    decoder.payload_type = recv_codec.codec.id;
    decoder.video_format =
        webrtc::SdpVideoFormat(recv_codec.codec.name, recv_codec.codec.params);
    config_.decoders.push_back(decoder);
    config_.rtp.rtx_associated_payload_types[recv_codec.rtx_payload_type] =
        recv_codec.codec.id;
    if (recv_codec.codec.packetization == kPacketizationParamRaw) {
      config_.rtp.raw_payload_types.insert(recv_codec.codec.id);
    }
  }
  const auto& codec = recv_codecs.front();
  config_.rtp.ulpfec_payload_type = codec.ulpfec.ulpfec_payload_type;
  config_.rtp.red_payload_type = codec.ulpfec.red_payload_type;
  config_.rtp.lntf.enabled = HasLntf(codec.codec);
  config_.rtp.nack.rtp_history_ms = HasNack(codec.codec) ? kNackHistoryMs : 0;
  config_.rtp.rtcp_xr.receiver_reference_time_report = HasRrtr(codec.codec);
  if (codec.ulpfec.red_rtx_payload_type != -1) {
    config_.rtp
        .rtx_associated_payload_types[codec.ulpfec.red_rtx_payload_type] =
        codec.ulpfec.red_payload_type;
  }
}

void WebRtcVideoChannel::WebRtcVideoReceiveStream::ConfigureFlexfecCodec(
    int flexfec_payload_type) {
  flexfec_config_.payload_type = flexfec_payload_type;
}

3)webrtc::VideoReceiveStream 创建及其初始化

webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream(
    webrtc::VideoReceiveStream::Config configuration) {
  TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream");
  RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);
  //如果支持transport wide cc 则不进行周期发送
  receive_side_cc_.SetSendPeriodicFeedback(
      SendPeriodicFeedback(configuration.rtp.extensions));
  RegisterRateObserver();
  VideoReceiveStream* receive_stream = new VideoReceiveStream(
      task_queue_factory_, &video_receiver_controller_, num_cpu_cores_,
      transport_send_ptr_->packet_router(), std::move(configuration),
      module_process_thread_.get(), call_stats_.get(), clock_);
  const webrtc::VideoReceiveStream::Config& config = receive_stream->config();
  {
    WriteLockScoped write_lock(*receive_crit_);
    if (config.rtp.rtx_ssrc) {
      // We record identical config for the rtx stream as for the main
      // stream. Since the transport_send_cc negotiation is per payload
      // type, we may get an incorrect value for the rtx stream, but
      // that is unlikely to matter in practice.
      receive_rtp_config_.emplace(config.rtp.rtx_ssrc,
                                  ReceiveRtpConfig(config));
    }
    receive_rtp_config_.emplace(config.rtp.remote_ssrc,
                                ReceiveRtpConfig(config));
    video_receive_streams_.insert(receive_stream);
    ConfigureSync(config.sync_group);
  }
  receive_stream->SignalNetworkState(video_network_state_);
  UpdateAggregateNetworkState();
  event_log_->Log(std::make_unique<RtcEventVideoReceiveStreamConfig>(
      CreateRtcLogStreamConfig(config)));
  return receive_stream;
}

VideoReceiveStream::VideoReceiveStream(
    TaskQueueFactory* task_queue_factory,
    RtpStreamReceiverControllerInterface* receiver_controller,
    int num_cpu_cores,
    PacketRouter* packet_router,
    VideoReceiveStream::Config config,
    ProcessThread* process_thread,
    CallStats* call_stats,
    Clock* clock,
    VCMTiming* timing)
    : task_queue_factory_(task_queue_factory),
      ....
      rtp_video_stream_receiver_(clock_,
                                  &transport_adapter_,
                                  call_stats,
                                  packet_router,
                                  &config_,
                                  rtp_receive_statistics_.get(),
                                  &stats_proxy_,
                                  process_thread_,
                                  this,     // NackSender
                                  nullptr,  // Use default KeyFrameRequestSender
                                  this,     // OnCompleteFrameCallback
                                  config_.frame_decryptor),
        ....  
    ) {
  RTC_LOG(LS_INFO) << "VideoReceiveStream: " << config_.ToString();
  ....
  if (config_.media_transport()) {
    config_.media_transport()->SetReceiveVideoSink(this);
    config_.media_transport()->AddRttObserver(this);
  } else {
    // Register with RtpStreamReceiverController.
    media_receiver_ = receiver_controller->CreateReceiver(
        config_.rtp.remote_ssrc, &rtp_video_stream_receiver_);
    if (config_.rtp.rtx_ssrc) {
      rtx_receive_stream_ = std::make_unique<RtxReceiveStream>(
          &rtp_video_stream_receiver_, config.rtp.rtx_associated_payload_types,
          config_.rtp.remote_ssrc, rtp_receive_statistics_.get());
      rtx_receiver_ = receiver_controller->CreateReceiver(
          config_.rtp.rtx_ssrc, rtx_receive_stream_.get());
    } else {
      rtp_receive_statistics_->EnableRetransmitDetection(config.rtp.remote_ssrc,
                                                          true);
    }
  }
}

4)VideoReceiveStream构造函数分析

VideoReceiveStream::VideoReceiveStream(
    TaskQueueFactory* task_queue_factory,
    RtpStreamReceiverControllerInterface* receiver_controller,
    int num_cpu_cores,
    PacketRouter* packet_router,
    VideoReceiveStream::Config config,
    ProcessThread* process_thread,
    CallStats* call_stats,
    Clock* clock,
    VCMTiming* timing)
    : task_queue_factory_(task_queue_factory),
      transport_adapter_(config.rtcp_send_transport),
      config_(std::move(config)),
      num_cpu_cores_(num_cpu_cores),
      process_thread_(process_thread),
      clock_(clock),
      call_stats_(call_stats),
      source_tracker_(clock_),
      stats_proxy_(&config_, clock_),
      rtp_receive_statistics_(ReceiveStatistics::Create(clock_)),
      timing_(timing),
      video_receiver_(clock_, timing_.get()),
      rtp_video_stream_receiver_(clock_,
                                  &transport_adapter_,
                                  call_stats,
                                  packet_router,
                                  &config_,
                                  rtp_receive_statistics_.get(),
                                  &stats_proxy_,
                                  process_thread_,
                                  this,     // NackSender
                                  nullptr,  // Use default KeyFrameRequestSender
                                  this,     // OnCompleteFrameCallback
                                  config_.frame_decryptor),
      rtp_stream_sync_(this),
      max_wait_for_keyframe_ms_(KeyframeIntervalSettings::ParseFromFieldTrials()
                                    .MaxWaitForKeyframeMs()
                                    .value_or(kMaxWaitForKeyFrameMs)),
      max_wait_for_frame_ms_(KeyframeIntervalSettings::ParseFromFieldTrials()
                                  .MaxWaitForFrameMs()
                                  .value_or(kMaxWaitForFrameMs)),
      decode_queue_(task_queue_factory_->CreateTaskQueue(
          "DecodingQueue",
          TaskQueueFactory::Priority::HIGH)) {
  .....        
  module_process_sequence_checker_.Detach();
  network_sequence_checker_.Detach();
  std::set<int> decoder_payload_types;
  for (const Decoder& decoder : config_.decoders) {
    .....
    decoder_payload_types.insert(decoder.payload_type);
  }
  timing_->set_render_delay(config_.render_delay_ms);
  frame_buffer_.reset(
      new video_coding::FrameBuffer(clock_, timing_.get(), &stats_proxy_));
  process_thread_->RegisterModule(&rtp_stream_sync_, RTC_FROM_HERE);
  if (config_.media_transport()) {
    config_.media_transport()->SetReceiveVideoSink(this);
    config_.media_transport()->AddRttObserver(this);
  } else {
    // Register with RtpStreamReceiverController.
    media_receiver_ = receiver_controller->CreateReceiver(
        config_.rtp.remote_ssrc, &rtp_video_stream_receiver_);
    if (config_.rtp.rtx_ssrc) {
      rtx_receive_stream_ = std::make_unique<RtxReceiveStream>(
          &rtp_video_stream_receiver_, config.rtp.rtx_associated_payload_types,
          config_.rtp.remote_ssrc, rtp_receive_statistics_.get());
      rtx_receiver_ = receiver_controller->CreateReceiver(
          config_.rtp.rtx_ssrc, rtx_receive_stream_.get());
    } else {
      rtp_receive_statistics_->EnableRetransmitDetection(config.rtp.remote_ssrc,
                                                          true);
    }
  }
}

5)VideoReceiveStream启动分析

void WebRtcVideoChannel::WebRtcVideoReceiveStream::RecreateWebRtcVideoStream() {
  .....
  stream_ = call_->CreateVideoReceiveStream(std::move(config));
  ....
  stream_->Start();
  ....
}

void VideoReceiveStream::Start() {
  RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
  if (decoder_running_) {
    return;
  }
  const bool protected_by_fec = config_.rtp.protected_by_flexfec ||
                                rtp_video_stream_receiver_.IsUlpfecEnabled();
  frame_buffer_->Start();
  if (rtp_video_stream_receiver_.IsRetransmissionsEnabled() &&
      protected_by_fec) {
    frame_buffer_->SetProtectionMode(kProtectionNackFEC);
  }
  /*使能RTCP和RTP发送*/
  transport_adapter_.Enable();
  rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
  /*enable_prerenderer_smoothing默认为true  
    render_delay_ms默认为10ms,定义在VideoReceiveStream::Stats结构中  
    此时的值应该为0,IncomingVideoStream用于平滑渲染,这里将this指针传入,最终VideoFrame
    会触发到VideoReceiveStream::onFrame()函数
  */
  if (config_.enable_prerenderer_smoothing) {
    incoming_video_stream_.reset(new IncomingVideoStream(
        task_queue_factory_, config_.render_delay_ms, this));
    renderer = incoming_video_stream_.get();
  } else {
    renderer = this;
  }
  for (const Decoder& decoder : config_.decoders) {
    std::unique_ptr<VideoDecoder> video_decoder =
      decoder.decoder_factory->LegacyCreateVideoDecoder(
                    decoder.video_format,config_.stream_id);
    // If we still have no valid decoder, we have to create a "Null" decoder
    // that ignores all calls. The reason we can get into this state is that the
    // old decoder factory interface doesn't have a way to query supported
    // codecs.
    if (!video_decoder) {
      video_decoder = std::make_unique<NullVideoDecoder>();
    }
    video_decoders_.push_back(std::move(video_decoder));
    video_receiver_.RegisterExternalDecoder(video_decoders_.back().get(),
                                            decoder.payload_type);
    VideoCodec codec = CreateDecoderVideoCodec(decoder);
    const bool raw_payload =
        config_.rtp.raw_payload_types.count(codec.plType) > 0;
    rtp_video_stream_receiver_.AddReceiveCodec(
        codec, decoder.video_format.parameters, raw_payload);
    RTC_CHECK_EQ(VCM_OK, video_receiver_.RegisterReceiveCodec(
                              &codec, num_cpu_cores_, false));
  }
  RTC_DCHECK(renderer != nullptr);
  video_stream_decoder_.reset(
      new VideoStreamDecoder(&video_receiver_, &stats_proxy_, renderer));
  // Make sure we register as a stats observer *after* we've prepared the
  // |video_stream_decoder_|.
  call_stats_->RegisterStatsObserver(this);
  // Start decoding on task queue.
  video_receiver_.DecoderThreadStarting();
  stats_proxy_.DecoderThreadStarting();
  decode_queue_.PostTask([this] {
    RTC_DCHECK_RUN_ON(&decode_queue_);
    decoder_stopped_ = false;
    StartNextDecode();
  });
  decoder_running_ = true;
  rtp_video_stream_receiver_.StartReceive();
}

6)总结


原文出处:WebRtc Video Receiver(二)-RTP包接收流程分析

1) 前言

2) RtpVideoStreamReceiver核心成员分析

3) RtpVideoStreamReceiver RTP包处理

4) RtpVideoStreamReceiver RTP包解析

void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
  if (packet.payload_size() == 0) {
    // Padding or keep-alive packet.
    // TODO(nisse): Could drop empty packets earlier, but need to figure out how
    // they should be counted in stats.
    NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
    return;
  } 
  if (packet.PayloadType() == config_.rtp.red_payload_type) {
    ParseAndHandleEncapsulatingHeader(packet);
    return;
  }
  /*容器大小为1,也就是握手后确定的解码器对应的payloadtype,以H264为例,对应107
    插入流程在原理(一)中有说
  */
  const auto type_it = payload_type_map_.find(packet.PayloadType());
  if (type_it == payload_type_map_.end()) {
    return;
  }
  /*根据payload_type创建解包器*/
  auto depacketizer =
      absl::WrapUnique(RtpDepacketizer::Create(type_it->second));
  if (!depacketizer) {
    RTC_LOG(LS_ERROR) << "Failed to create depacketizer.";
    return;
  }
  RtpDepacketizer::ParsedPayload parsed_payload;
  if (!depacketizer->Parse(&parsed_payload, packet.payload().data(),
                            packet.payload().size())) {
    RTC_LOG(LS_WARNING) << "Failed parsing payload.";
    return;
  }
  RTPHeader rtp_header;
  packet.GetHeader(&rtp_header);
  /*信息封装在RtpDepacketizer当中*/  
  RTPVideoHeader video_header = parsed_payload.video_header();
  ......
  video_header.is_last_packet_in_frame = rtp_header.markerBit;
  video_header.frame_marking.temporal_id = kNoTemporalIdx;
  if (parsed_payload.video_header().codec == kVideoCodecVP9) {
    const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
        parsed_payload.video_header().video_type_header);
    video_header.is_last_packet_in_frame |= codec_header.end_of_frame;
    video_header.is_first_packet_in_frame |= codec_header.beginning_of_frame;
  }
  /*解析扩展信息*/
  packet.GetExtension<VideoOrientation>(&video_header.rotation);
  packet.GetExtension<VideoContentTypeExtension>(&video_header.content_type);
  packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
  /*解析播放延迟限制?*/  
  packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
  packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
  // Color space should only be transmitted in the last packet of a frame,
  // therefore, neglect it otherwise so that last_color_space_ is not reset by
  // mistake.
  /*颜色空间应该只在帧的最后一个数据包中传输,因此,需要忽略它,
    否则当发生错误的时候使last_color_space_不会被重置,为啥要这样? */
  if (video_header.is_last_packet_in_frame) {
    video_header.color_space = packet.GetExtension<ColorSpaceExtension>();
    if (video_header.color_space ||
        video_header.frame_type == VideoFrameType::kVideoFrameKey) {
      // Store color space since it's only transmitted when changed or for key
      // frames. Color space will be cleared if a key frame is transmitted
      // without color space information.
      last_color_space_ = video_header.color_space;
    } else if (last_color_space_) {
      video_header.color_space = last_color_space_;
    }
  }
  ......
  OnReceivedPayloadData(parsed_payload.payload, parsed_payload.payload_length,
                        rtp_header, video_header, generic_descriptor_wire,
                        packet.recovered());
}

5) RtpVideoStreamReceiver VCMPacket封装及关键帧请求

5.1) RtpVideoStreamReceiver VCMPacket封装及容错处理

int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
    const uint8_t* payload_data,
    size_t payload_size,
    const RTPHeader& rtp_header,
    const RTPVideoHeader& video_header,
    const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
    bool is_recovered) {
  VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
                    ntp_estimator_.Estimate(rtp_header.timestamp),
                    clock_->TimeInMilliseconds());
  packet.generic_descriptor = generic_descriptor;
  .......
  if (packet.codec() == kVideoCodecH264) {
    // Only when we start to receive packets will we know what payload type
    // that will be used. When we know the payload type insert the correct
    // sps/pps into the tracker.
    if (packet.payloadType != last_payload_type_) {
      last_payload_type_ = packet.payloadType;
      InsertSpsPpsIntoTracker(packet.payloadType);
    }
    switch (tracker_.CopyAndFixBitstream(&packet)) {
      case video_coding::H264SpsPpsTracker::kRequestKeyframe:
        rtcp_feedback_buffer_.RequestKeyFrame();
        rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
        RTC_FALLTHROUGH();
      case video_coding::H264SpsPpsTracker::kDrop:
        return 0;
      case video_coding::H264SpsPpsTracker::kInsert:
        break;
    }
  } 
  ......  
  return 0;
}
H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
    VCMPacket* packet) {
  RTC_DCHECK(packet->codec() == kVideoCodecH264);
  const uint8_t* data = packet->dataPtr;
  const size_t data_size = packet->sizeBytes;
  const RTPVideoHeader& video_header = packet->video_header;
  auto& h264_header =
      absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
  bool append_sps_pps = false;
  auto sps = sps_data_.end();
  auto pps = pps_data_.end();
  for (size_t i = 0; i < h264_header.nalus_length; ++i) {
    const NaluInfo& nalu = h264_header.nalus[i];
    switch (nalu.type) {
      case H264::NaluType::kSps: {
        sps_data_[nalu.sps_id].width = packet->width();
        sps_data_[nalu.sps_id].height = packet->height();
        break;
      }
      case H264::NaluType::kPps: {
        pps_data_[nalu.pps_id].sps_id = nalu.sps_id;
        break;
      }
      case H264::NaluType::kIdr: {
        // If this is the first packet of an IDR, make sure we have the required
        // SPS/PPS and also calculate how much extra space we need in the buffer
        // to prepend the SPS/PPS to the bitstream with start codes.
        if (video_header.is_first_packet_in_frame) {
          if (nalu.pps_id == -1) {
            RTC_LOG(LS_WARNING) << "No PPS id in IDR nalu.";
            return kRequestKeyframe;
          }
          pps = pps_data_.find(nalu.pps_id);
          if (pps == pps_data_.end()) {
            RTC_LOG(LS_WARNING)
                << "No PPS with id << " << nalu.pps_id << " received";
            return kRequestKeyframe;
          }
          sps = sps_data_.find(pps->second.sps_id);
          if (sps == sps_data_.end()) {
            RTC_LOG(LS_WARNING)
                << "No SPS with id << " << pps->second.sps_id << " received";
            return kRequestKeyframe;
          }
          // Since the first packet of every keyframe should have its width and
          // height set we set it here in the case of it being supplied out of
          // band.
          packet->video_header.width = sps->second.width;
          packet->video_header.height = sps->second.height;
          // If the SPS/PPS was supplied out of band then we will have saved
          // the actual bitstream in |data|.
          if (sps->second.data && pps->second.data) {
            RTC_DCHECK_GT(sps->second.size, 0);
            RTC_DCHECK_GT(pps->second.size, 0);
            append_sps_pps = true;
          }
        }
        break;
      }
      default:
        break;
    }
  }
  RTC_CHECK(!append_sps_pps ||
            (sps != sps_data_.end() && pps != pps_data_.end()));
  // Calculate how much space we need for the rest of the bitstream.
  size_t required_size = 0;
  if (append_sps_pps) {
    required_size += sps->second.size + sizeof(start_code_h264);
    required_size += pps->second.size + sizeof(start_code_h264);
  }
    //RTC_LOG(INFO) << "h264_header.packetization_type:" << h264_header.packetization_type;
  if (h264_header.packetization_type == kH264StapA) {
    const uint8_t* nalu_ptr = data + 1;
    while (nalu_ptr < data + data_size) {
      RTC_DCHECK(video_header.is_first_packet_in_frame);
      required_size += sizeof(start_code_h264);
      // The first two bytes describe the length of a segment.
      uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
      nalu_ptr += 2;
      required_size += segment_length;
      nalu_ptr += segment_length;
    }
  } else {//default kH264FuA
    if (h264_header.nalus_length > 0) {
      required_size += sizeof(start_code_h264);
    }
    required_size += data_size;
  }
  // Then we copy to the new buffer.
  uint8_t* buffer = new uint8_t[required_size];
  uint8_t* insert_at = buffer;
  if (append_sps_pps) {
    // Insert SPS.
    memcpy(insert_at, start_code_h264, sizeof(start_code_h264));
    insert_at += sizeof(start_code_h264);
    memcpy(insert_at, sps->second.data.get(), sps->second.size);
    insert_at += sps->second.size;
    // Insert PPS.
    memcpy(insert_at, start_code_h264, sizeof(start_code_h264));
    insert_at += sizeof(start_code_h264);
    memcpy(insert_at, pps->second.data.get(), pps->second.size);
    insert_at += pps->second.size;
    // Update codec header to reflect the newly added SPS and PPS.
    NaluInfo sps_info;
    sps_info.type = H264::NaluType::kSps;
    sps_info.sps_id = sps->first;
    sps_info.pps_id = -1;
    NaluInfo pps_info;
    pps_info.type = H264::NaluType::kPps;
    pps_info.sps_id = sps->first;
    pps_info.pps_id = pps->first;
    if (h264_header.nalus_length + 2 <= kMaxNalusPerPacket) {
      h264_header.nalus[h264_header.nalus_length++] = sps_info;
      h264_header.nalus[h264_header.nalus_length++] = pps_info;
    } else {
      RTC_LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
                              "SPS/PPS provided out-of-band.";
    }
  }
  // Copy the rest of the bitstream and insert start codes.
  if (h264_header.packetization_type == kH264StapA) {
    const uint8_t* nalu_ptr = data + 1;
    while (nalu_ptr < data + data_size) {
      memcpy(insert_at, start_code_h264, sizeof(start_code_h264));
      insert_at += sizeof(start_code_h264);
      // The first two bytes describe the length of a segment.
      uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
      nalu_ptr += 2;
      size_t copy_end = nalu_ptr - data + segment_length;
      if (copy_end > data_size) {
        delete[] buffer;
        return kDrop;
      }
      memcpy(insert_at, nalu_ptr, segment_length);
      insert_at += segment_length;
      nalu_ptr += segment_length;
    }
  } else {
    if (h264_header.nalus_length > 0) {
      memcpy(insert_at, start_code_h264, sizeof(start_code_h264));
      insert_at += sizeof(start_code_h264);
    }
    memcpy(insert_at, data, data_size);
  }
  packet->dataPtr = buffer;
  packet->sizeBytes = required_size;
  return kInsert;
}

5.2) RtpVideoStreamReceiver 关键帧请求

int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
    const uint8_t* payload_data,
    size_t payload_size,
    const RTPHeader& rtp_header,
    const RTPVideoHeader& video_header,
    const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
    bool is_recovered) {
  VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
                    ntp_estimator_.Estimate(rtp_header.timestamp),
                    clock_->TimeInMilliseconds());
    ....
    switch (tracker_.CopyAndFixBitstream(&packet)) {
      case video_coding::H264SpsPpsTracker::kRequestKeyframe:
        rtcp_feedback_buffer_.RequestKeyFrame();
        rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
        RTC_FALLTHROUGH();
      case video_coding::H264SpsPpsTracker::kDrop:
        return 0;
      case video_coding::H264SpsPpsTracker::kInsert:
        break;
    }
    .....
}

void RtpVideoStreamReceiver::RtcpFeedbackBuffer::RequestKeyFrame() {
  rtc::CritScope lock(&cs_);
  request_key_frame_ = true;
}
void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() {
  bool request_key_frame = false;
  std::vector<uint16_t> nack_sequence_numbers;
  absl::optional<LossNotificationState> lntf_state;
  ....
  {
    rtc::CritScope lock(&cs_);
    std::swap(request_key_frame, request_key_frame_);
  }
  .....
  if (request_key_frame) {
    key_frame_request_sender_->RequestKeyFrame();
  } else if (!nack_sequence_numbers.empty()) {
    nack_sender_->SendNack(nack_sequence_numbers, true);
  }
}
void RtpVideoStreamReceiver::RequestKeyFrame() {
  if (keyframe_request_sender_) {//默认为nullptr
    keyframe_request_sender_->RequestKeyFrame();
  } else {
    rtp_rtcp_->SendPictureLossIndication();
  }
}

原文出处:WebRTC Video Receiver(三)-NACK丢包重传原理

1)前言

2)NackModule的工作原理以及和RtpVideoStreamReceiver之间的关系

2.1)M79版本

2.2)M85版本的变化

3)NackModule OnReceivedPacket函数工作流程

int NackModule2::OnReceivedPacket(uint16_t seq_num,
                                  bool is_keyframe,/*是否为关键帧*/
                                  bool is_recovered/*是否为恢复的包RTX or FEC*/) {
  rtc::CritScope lock(&crit_);
  // TODO(philipel): When the packet includes information whether it is
  //                 retransmitted or not, use that value instead. For
  //                 now set it to true, which will cause the reordering
  //                 statistics to never be updated.
  bool is_retransmitted = true;
  //newest_seq_num_可以理解成截止当前收到的最新的一个seq number
  if (!initialized_) {
    newest_seq_num_ = seq_num;
    if (is_keyframe)
      keyframe_list_.insert(seq_num);
    initialized_ = true;
    return 0;
  }
  // Since the |newest_seq_num_| is a packet we have actually received we know
  // that packet has never been Nacked.
  //seq_num 表示当前刚收到包的序列号,newest_seq_num_表示截止当前收到的最新的一个seq number,怎么理解呢,在seq未环绕的情况下可以理解成最大的一个
  if (seq_num == newest_seq_num_)
    return 0;
  //如果发生了丢包,这里收到重传包则会条件成立seq_num表示当前收到的重传包的序列号
  if (AheadOf(newest_seq_num_, seq_num)) {
    // An out of order packet has been received.
    auto nack_list_it = nack_list_.find(seq_num);
    int nacks_sent_for_packet = 0;
    //如果nack_list_集合中有seq_num则进行清除,同时记录当前包历经了多少次重传再收到  
    if (nack_list_it != nack_list_.end()) {
      nacks_sent_for_packet = nack_list_it->second.retries;
      nack_list_.erase(nack_list_it);
    }
    if (!is_retransmitted)
      UpdateReorderingStatistics(seq_num);
    //返回当前包经历了多少次数,在组包模块中会使用到。  
    return nacks_sent_for_packet;
  }
  // Keep track of new keyframes.
  // 如果当前包为关键帧则插入到keyframe_list_  
  if (is_keyframe)
    keyframe_list_.insert(seq_num);
  // lower_bound(val):返回容器中第一个【大于或等于】val值的元素的iterator位置。
  // And remove old ones so we don't accumulate keyframes.
  auto it = keyframe_list_.lower_bound(seq_num - kMaxPacketAge);
  if (it != keyframe_list_.begin())
    keyframe_list_.erase(keyframe_list_.begin(), it);
  if (is_recovered) {
    recovered_list_.insert(seq_num);
    // Remove old ones so we don't accumulate recovered packets.
    auto it = recovered_list_.lower_bound(seq_num - kMaxPacketAge);
    if (it != recovered_list_.begin())
      recovered_list_.erase(recovered_list_.begin(), it);
    // Do not send nack for packets recovered by FEC or RTX.
    return 0;
  }
  AddPacketsToNack(newest_seq_num_ + 1, seq_num);
  newest_seq_num_ = seq_num;
  // Are there any nacks that are waiting for this seq_num.
  std::vector<uint16_t> nack_batch = GetNackBatch(kSeqNumOnly);
  if (!nack_batch.empty()) {
    // This batch of NACKs is triggered externally; the initiator can
    // batch them with other feedback messages.
    nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/true);
  }
  return 0;
}
newest_seq_num_:36 seq_num:37 is_keyframe:0 is_recovered: 0 AheadOf(newest_seq_num_, seq_num) : 0
newest_seq_num_:37 seq_num:38 is_keyframe:0 is_recovered: 0 AheadOf(newest_seq_num_, seq_num) : 0
newest_seq_num_:38 seq_num:41 is_keyframe:0 is_recovered: 0 AheadOf(newest_seq_num_, seq_num) : 0
newest_seq_num_:41 seq_num:42 is_keyframe:0 is_recovered: 0 AheadOf(newest_seq_num_, seq_num) : 0
newest_seq_num_:42 seq_num:43 is_keyframe:0 is_recovered: 0 AheadOf(newest_seq_num_, seq_num) : 0
newest_seq_num_:43 seq_num:40 is_keyframe:0 is_recovered: 1 AheadOf(newest_seq_num_, seq_num) : 1
newest_seq_num_:43 seq_num:39 is_keyframe:0 is_recovered: 1 AheadOf(newest_seq_num_, seq_num) : 1
newest_seq_num_:43 seq_num:44 is_keyframe:0 is_recovered: 0 AheadOf(newest_seq_num_, seq_num) : 0
newest_seq_num_:5 seq_num:6 is_keyframe:1  keyframe_list_.size():0 recovered_list_.size():0 nack_list_.size():0
newest_seq_num_:6 seq_num:7 is_keyframe:1  keyframe_list_.size():1 recovered_list_.size():0 nack_list_.size():0
newest_seq_num_:7 seq_num:8 is_keyframe:0  keyframe_list_.size():2 recovered_list_.size():0 nack_list_.size():0
newest_seq_num_:8 seq_num:9 is_keyframe:0  keyframe_list_.size():2 recovered_list_.size():0 nack_list_.size():0
const int kMaxPacketAge = 10000;  
/*这里会返回第一个大于0的位置,也就是6号包的位置*/
auto it = keyframe_list_.lower_bound(seq_num - kMaxPacketAge);/*6-10000*/
if (it != keyframe_list_.begin())
  keyframe_list_.erase(keyframe_list_.begin(), it);

4)NackModule AddPacketsToNack函数丢包判断工作原理

void NackModule::AddPacketsToNack(uint16_t seq_num_start,//newest_seq_num_ + 1
                                  uint16_t seq_num_end//seq_num) {
  // Remove old packets.
  auto it = nack_list_.lower_bound(seq_num_end - kMaxPacketAge);
  nack_list_.erase(nack_list_.begin(), it);
  // If the nack list is too large, remove packets from the nack list until
  // the latest first packet of a keyframe. If the list is still too large,
  // clear it and request a keyframe.
  // 缓存太多丢失的包,进行清除处理  
  uint16_t num_new_nacks = ForwardDiff(seq_num_start, seq_num_end);
  if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
    while (RemovePacketsUntilKeyFrame() &&
            nack_list_.size() + num_new_nacks > kMaxNackPackets) {
    }
    if (nack_list_.size() + num_new_nacks > kMaxNackPackets) {
      nack_list_.clear();
      RTC_LOG(LS_WARNING) << "NACK list full, clearing NACK"
                              " list and requesting keyframe.";
      keyframe_request_sender_->RequestKeyFrame();
      return;
    }
  }
  /*丢包判断逻辑,如果包连续的话应该是不进for循环的*/
  for (uint16_t seq_num = seq_num_start; seq_num != seq_num_end; ++seq_num) {
    // Do not send nack for packets that are already recovered by FEC or RTX
    if (recovered_list_.find(seq_num) != recovered_list_.end())
      continue;
    /*默认WaitNumberOfPackets(0.5)返回0*/  
    NackInfo nack_info(seq_num, seq_num + WaitNumberOfPackets(0.5),
                        clock_->TimeInMilliseconds());
    RTC_DCHECK(nack_list_.find(seq_num) == nack_list_.end());
    nack_list_[seq_num] = nack_info;
  }
}
newest_seq_num_:38 seq_num:41 is_keyframe:0 is_recovered: 0 
newest_seq_num_:41 seq_num:42 is_keyframe:0 is_recovered: 0 
newest_seq_num_:42 seq_num:43 is_keyframe:0 is_recovered: 0
bool NackModule::RemovePacketsUntilKeyFrame() {
  while (!keyframe_list_.empty()) {
    /* 从keyframe_list_中得到第一个值(假设为a),然后以此值为value,找出nack_list_容器中第一个大于等于a的迭代器的位置
      * 将nack_list_的启始位置到对应a值这个seq之间的全部删除,也就是a以前的seq全部移除。
      */
    auto it = nack_list_.lower_bound(*keyframe_list_.begin());
    if (it != nack_list_.begin()) {
      // We have found a keyframe that actually is newer than at least one
      // packet in the nack list.
      nack_list_.erase(nack_list_.begin(), it);
      return true;
    }
    //如果it == nack_list_.begin() 说明这个关键帧也很老了,将其移除掉。
    // If this keyframe is so old it does not remove any packets from the list,
    // remove it from the list of keyframes and try the next keyframe.
    keyframe_list_.erase(keyframe_list_.begin());
  }
  return false;
}

5)NackModule NACK发送流程

5.1)M79版本

std::vector<uint16_t> NackModule::GetNackBatch(NackFilterOptions options) {
  bool consider_seq_num = options != kTimeOnly;
  bool consider_timestamp = options != kSeqNumOnly;
  int64_t now_ms = clock_->TimeInMilliseconds();
  std::vector<uint16_t> nack_batch;
  auto it = nack_list_.begin();
  while (it != nack_list_.end()) {
    bool delay_timed_out =
        now_ms - it->second.created_at_time >= send_nack_delay_ms_;
    bool nack_on_rtt_passed = now_ms - it->second.sent_at_time >= rtt_ms_;
    /*在创建NackInfo的时候send_at_seq_num和其对应丢包的seq值是相等的,在默认情况下 
    */
    bool nack_on_seq_num_passed =
        it->second.sent_at_time == -1 &&
        AheadOrAt(newest_seq_num_, it->second.send_at_seq_num);
    if (delay_timed_out && ((consider_seq_num && nack_on_seq_num_passed) ||
                            (consider_timestamp && nack_on_rtt_passed))) {
      nack_batch.emplace_back(it->second.seq_num);
      ++it->second.retries;
      it->second.sent_at_time = now_ms;
      if (it->second.retries >= kMaxNackRetries) {
        RTC_LOG(LS_WARNING) << "Sequence number " << it->second.seq_num
                            << " removed from NACK list due to max retries.";
        it = nack_list_.erase(it);
      } else {
        ++it;
      }
      continue;
    }
    ++it;
  }
  return nack_batch;
}

5.2)M85版本

NackModule2::NackModule2(TaskQueueBase* current_queue,
                          Clock* clock,
                          NackSender* nack_sender,
                          KeyFrameRequestSender* keyframe_request_sender,
                          TimeDelta update_interval /*= kUpdateInterval*/)
    : worker_thread_(current_queue),
      update_interval_(update_interval),
      clock_(clock),
      nack_sender_(nack_sender),
      keyframe_request_sender_(keyframe_request_sender),
      reordering_histogram_(kNumReorderingBuckets, kMaxReorderedPackets),
      initialized_(false),
      rtt_ms_(kDefaultRttMs),
      newest_seq_num_(0),
      send_nack_delay_ms_(GetSendNackDelay()),
      backoff_settings_(BackoffSettings::ParseFromFieldTrials()) {
  repeating_task_ = RepeatingTaskHandle::DelayedStart(
      TaskQueueBase::Current(), update_interval_,
      [this]() {
        RTC_DCHECK_RUN_ON(worker_thread_);
        std::vector<uint16_t> nack_batch = GetNackBatch(kTimeOnly);
        if (!nack_batch.empty()) {
          // This batch of NACKs is triggered externally; there is no external
          // initiator who can batch them with other feedback messages.
          nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/false);
        }
        return update_interval_;
      },
      clock_);
}

6) 总结


原文出处:WebRTC Video Receiver(四)-组帧原理分析

1)前言

struct InsertResult {
  std::vector<std::unique_ptr<Packet>> packets;
  // Indicates if the packet buffer was cleared, which means that a key
  // frame request should be sent.
  bool buffer_cleared = false;
};

2)video_coding::PacketBuffer数据结构分析

struct Packet {
  // If all its previous packets have been inserted into the packet buffer.
  // Set and used internally by the PacketBuffer.
  bool continuous = false;
  bool marker_bit = false;
  uint8_t payload_type = 0;
  uint16_t seq_num = 0;
  uint32_t timestamp = 0;
  // NTP time of the capture time in local timebase in milliseconds.
  int64_t ntp_time_ms = -1;
  int times_nacked = -1;
  rtc::CopyOnWriteBuffer video_payload;
  RTPVideoHeader video_header;
  RtpPacketInfo packet_info;
};
bool PacketBuffer::PotentialNewFrame(uint16_t seq_num) const {
  //通过取模运算来获取传入seq numer对赢的Packet在buffer_中的位置索引 
  size_t index = seq_num % buffer_.size();
  //得到前一个包的索引  
  int prev_index = index > 0 ? index - 1 : buffer_.size() - 1;
  //得到seq_number对应的Packet实例引用  
  const auto& entry = buffer_[index];
  //得到seq_number的前一个包对应的Packet实例引用  
  const auto& prev_entry = buffer_[prev_index];
  //如果entry为空说明当前seq_num对应的Packet还没有被插到buffer_中,返回false
  //说明当前seq num还没有潜在的帧存在  
  if (entry == nullptr)
    return false;
  if (entry->seq_num != seq_num)
    return false;
  //如果seq num对应的包是一帧数据的第一个包,则说明前面可能有一帧数据  
  if (entry->is_first_packet_in_frame())
    return true;
  if (prev_entry == nullptr)
    return false;
  //上一个包的seq不等于当前seq num -1 表明丢包  
  if (prev_entry->seq_num != static_cast<uint16_t>(entry->seq_num - 1))
    return false;
  if (prev_entry->timestamp != entry->timestamp)
    return false;
  //如前面所以条件都满足  
  if (prev_entry->continuous)
    return true;
  return false;
}

3)PacketBuffer::InsertPacket 工作流程

PacketBuffer::InsertResult PacketBuffer::InsertPacket(
    std::unique_ptr<PacketBuffer::Packet> packet) {
  PacketBuffer::InsertResult result;
  MutexLock lock(&mutex_);
  uint16_t seq_num = packet->seq_num;
  //计算索引  
  size_t index = seq_num % buffer_.size();
  //首次接收到rtp包,更新first_seq_num_为seq_num
  if (!first_packet_received_) {
    first_seq_num_ = seq_num;
    first_packet_received_ = true;
  } else if (AheadOf(first_seq_num_, seq_num)) {//如果收到重传恢复的包
    // If we have explicitly cleared past this packet then it's old,
    // don't insert it, just silently ignore it.
    if (is_cleared_to_first_seq_num_) {
      return result;
    }
    first_seq_num_ = seq_num;
  }
  if (buffer_[index] != nullptr) {
    // Duplicate packet, just delete the payload.
    if (buffer_[index]->seq_num == packet->seq_num) {
      return result;
    }
    // The packet buffer is full, try to expand the buffer.
    while (ExpandBufferSize() && buffer_[seq_num % buffer_.size()] != nullptr) {
    }
    index = seq_num % buffer_.size();
    //容器已经满了,需要清除buffer
    // Packet buffer is still full since we were unable to expand the buffer.
    if (buffer_[index] != nullptr) {
      // Clear the buffer, delete payload, and return false to signal that a
      // new keyframe is needed.
      RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame.";
      ClearInternal();
      //RtpVideoStreamReceiver2::OnInsertedPacket()函数根据该标识进行关键帧请求  
      result.buffer_cleared = true;
      return result;
    }
  }
  int64_t now_ms = clock_->TimeInMilliseconds();
  last_received_packet_ms_ = now_ms;
  if (packet->video_header.frame_type == VideoFrameType::kVideoFrameKey ||
      last_received_keyframe_rtp_timestamp_ == packet->timestamp) {
    last_received_keyframe_packet_ms_ = now_ms;
    last_received_keyframe_rtp_timestamp_ = packet->timestamp;
  }
  packet->continuous = false;
  buffer_[index] = std::move(packet);
    /*4) 更新丢包容器*/
  UpdateMissingPackets(seq_num);
  /*5) 组帧处理*/
  result.packets = FindFrames(seq_num);
  return result;
}

4) 更新丢包记录

void PacketBuffer::UpdateMissingPackets(uint16_t seq_num) {
  if (!newest_inserted_seq_num_)
    newest_inserted_seq_num_ = seq_num;
  const int kMaxPaddingAge = 1000;
  //如果不丢包的话条件会一直成立  
  if (AheadOf(seq_num, *newest_inserted_seq_num_)) {
    uint16_t old_seq_num = seq_num - kMaxPaddingAge;
    auto erase_to = missing_packets_.lower_bound(old_seq_num);
    missing_packets_.erase(missing_packets_.begin(), erase_to);
    // Guard against inserting a large amount of missing packets if there is a
    // jump in the sequence number.
    if (AheadOf(old_seq_num, *newest_inserted_seq_num_))
      *newest_inserted_seq_num_ = old_seq_num;
    ++*newest_inserted_seq_num_;
    //如果条件成立则表示丢包,missing_packets_插入丢失的包号  
    while (AheadOf(seq_num, *newest_inserted_seq_num_)) {
      missing_packets_.insert(*newest_inserted_seq_num_);
      ++*newest_inserted_seq_num_;
    }
  } else {//收到恢复的包
    missing_packets_.erase(seq_num);
  }
}

5) PacketBuffer::FindFrames查找合适的帧

std::vector<std::unique_ptr<PacketBuffer::Packet>> PacketBuffer::FindFrames(
  uint16_t seq_num) {
std::vector<std::unique_ptr<PacketBuffer::Packet>> found_frames;
//在for循环条件中根据PotentialNewFrame查找潜在帧  
for (size_t i = 0; i < buffer_.size() && PotentialNewFrame(seq_num); ++i) {
  //得到索引  
  size_t index = seq_num % buffer_.size();
  //能到这里将Packet.continuous设置成true,说明对应当前(之前的帧就不一定了)帧的
  //每一个包是连续的  
  buffer_[index]->continuous = true;
  // If all packets of the frame is continuous, find the first packet of the
  // frame and add all packets of the frame to the returned packets.
  // 如果该seq 对应的包是当前帧的最后一个包再进行实际操作,进行逆向查找。  
  if (buffer_[index]->is_last_packet_in_frame()) {
    uint16_t start_seq_num = seq_num;
    // Find the start index by searching backward until the packet with
    // the |frame_begin| flag is set.
    int start_index = index;
    size_t tested_packets = 0;
    int64_t frame_timestamp = buffer_[start_index]->timestamp;
    // Identify H.264 keyframes by means of SPS, PPS, and IDR.
    bool is_h264 = buffer_[start_index]->codec() == kVideoCodecH264;
    bool has_h264_sps = false;
    bool has_h264_pps = false;
    bool has_h264_idr = false;
    bool is_h264_keyframe = false;
    int idr_width = -1;
    int idr_height = -1;
    //第2部分,以当前seq的包对应的位置为索引进行逆向查找找出当前帧第一个包的位置
    //也就是start_seq_num  
    while (true) {
      ++tested_packets;
      //如果是h264,找到该帧的首个包则跳出该循环,核心就是这一句代码。。
      if (!is_h264 && buffer_[start_index]->is_first_packet_in_frame())
        break;
      //以下操作是对H264数据进行校验
      if (is_h264) {
        const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
            &buffer_[start_index]->video_header.video_type_header);
        if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
          return found_frames;
        for (size_t j = 0; j < h264_header->nalus_length; ++j) {
          if (h264_header->nalus[j].type == H264::NaluType::kSps) {
            has_h264_sps = true;
          } else if (h264_header->nalus[j].type == H264::NaluType::kPps) {
            has_h264_pps = true;
          } else if (h264_header->nalus[j].type == H264::NaluType::kIdr) {
            has_h264_idr = true;
          }
        }
          /*通过WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/来开启
            sps_pps_idr_is_h264_keyframe_
          * 表示idr包必须前面有sps pps 等信息,表示当前帧是否为关键帧
        */             
        if ((sps_pps_idr_is_h264_keyframe_ && has_h264_idr && has_h264_sps &&
              has_h264_pps) ||
            (!sps_pps_idr_is_h264_keyframe_ && has_h264_idr)) {
          //判断当前帧是否为关键帧  
          is_h264_keyframe = true;
          // Store the resolution of key frame which is the packet with
          // smallest index and valid resolution; typically its IDR or SPS
          // packet; there may be packet preceeding this packet, IDR's
          // resolution will be applied to them.
          if (buffer_[start_index]->width() > 0 &&
              buffer_[start_index]->height() > 0) {
            idr_width = buffer_[start_index]->width();
            idr_height = buffer_[start_index]->height();
          }
        }
      }
      if (tested_packets == buffer_.size())
        break;
      start_index = start_index > 0 ? start_index - 1 : buffer_.size() - 1;
      // In the case of H264 we don't have a frame_begin bit (yes,
      // |frame_begin| might be set to true but that is a lie). So instead
      // we traverese backwards as long as we have a previous packet and
      // the timestamp of that packet is the same as this one. This may cause
      // the PacketBuffer to hand out incomplete frames.
      // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7106
      //同一帧数据的timestamp是相等的,如果不相等说明不是同一帧  
      if (is_h264 && (buffer_[start_index] == nullptr ||
                      buffer_[start_index]->timestamp != frame_timestamp)) {
        break;
      }
      --start_seq_num;
    }//while (true)结束,已经得到当前帧的首个包的seq
    //第3部分判断帧的连续性  
    if (is_h264) {
      // Warn if this is an unsafe frame.
      if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
        RTC_LOG(LS_WARNING)
            << "Received H.264-IDR frame "
                "(SPS: "
            << has_h264_sps << ", PPS: " << has_h264_pps << "). Treating as "
            << (sps_pps_idr_is_h264_keyframe_ ? "delta" : "key")
            << " frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
            << (sps_pps_idr_is_h264_keyframe_ ? "enabled." : "disabled");
      }
      // Now that we have decided whether to treat this frame as a key frame
      // or delta frame in the frame buffer, we update the field that
      // determines if the RtpFrameObject is a key frame or delta frame.
      // 得到该帧的首个包的在buffer_中的索引。  
      const size_t first_packet_index = start_seq_num % buffer_.size();
      // h264数据,这里解析判断当前帧是否为关键帧,并初始化Packet的  
      // ideo_header.frame_type成员变量  
      if (is_h264_keyframe) {
        buffer_[first_packet_index]->video_header.frame_type =
            VideoFrameType::kVideoFrameKey;
        if (idr_width > 0 && idr_height > 0) {
          // IDR frame was finalized and we have the correct resolution for
          // IDR; update first packet to have same resolution as IDR.
          buffer_[first_packet_index]->video_header.width = idr_width;
          buffer_[first_packet_index]->video_header.height = idr_height;
        }
      } else {
        buffer_[first_packet_index]->video_header.frame_type =
            VideoFrameType::kVideoFrameDelta;
      }
      // If this is not a keyframe, make sure there are no gaps in the packet
      // sequence numbers up until this point.
      // 对于H264数据,若当前组好的帧为P帧那么必须要有前向参考帧才能正常解码, 通过
      // missing_packets_.upper_bound(start_seq_num) 判断missing_packets_容器中
      // 是否有start_seq_num之前的包还没有收到,如果有则直接返回,不再继续组帧了
      if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) !=
                                    missing_packets_.begin()) {
        return found_frames;
      }
      // 举个例子,假设25~27号为一帧完整的数据,到这个地方,程序也发现了,但是由于丢包的
      // 原因假设此时missing_packets_容器中记录的数据为 20 23 30 31,又由于此帧为非
      // 关键帧所以帧不连续,则不再继续进行组帧操作。 
      // 由此也可以看出,对于H264数据,只要是有一帧完整的I帧率到达此处则可以继续往下执行
    }
    // 第4部分将已经发现的帧对应的Packet插入到found_frames容器
    const uint16_t end_seq_num = seq_num + 1;
    // Use uint16_t type to handle sequence number wrap around case.
    uint16_t num_packets = end_seq_num - start_seq_num;
    found_frames.reserve(found_frames.size() + num_packets);
    for (uint16_t i = start_seq_num; i != end_seq_num; ++i) {
      std::unique_ptr<Packet>& packet = buffer_[i % buffer_.size()];
      RTC_DCHECK(packet);
      RTC_DCHECK_EQ(i, packet->seq_num);
      // Ensure frame boundary flags are properly set.
      packet->video_header.is_first_packet_in_frame = (i == start_seq_num);
      packet->video_header.is_last_packet_in_frame = (i == seq_num);
      found_frames.push_back(std::move(packet));
    }
    // 把missing_packets_容器中小于seq的序号进行清除。  
    // 对于H264如果P帧的前向参考帧丢失,那么在之前就会返回,程序运行不到此处。  
    // 程序运行到这里,假设该帧是关键帧率,但是前面有丢失的帧,buffer_还没有被清理,
    // 在该帧进入解码之前会调用ClearTo函数对seq 之前的buffer_进行清除。  
    missing_packets_.erase(missing_packets_.begin(),
                            missing_packets_.upper_bound(seq_num));
  }
  ++seq_num;
}

6) OnInsertedPacket组帧处理

void RtpVideoStreamReceiver2::OnInsertedPacket(
    video_coding::PacketBuffer::InsertResult result) {
  RTC_DCHECK_RUN_ON(&worker_task_checker_);
  video_coding::PacketBuffer::Packet* first_packet = nullptr;
  int max_nack_count;
  int64_t min_recv_time;
  int64_t max_recv_time;
  std::vector<rtc::ArrayView<const uint8_t>> payloads;
  RtpPacketInfos::vector_type packet_infos;
  bool frame_boundary = true;
  for (auto& packet : result.packets) {
    // PacketBuffer promisses frame boundaries are correctly set on each
    // packet. Document that assumption with the DCHECKs.
      .... 
    payloads.emplace_back(packet->video_payload);
    packet_infos.push_back(packet->packet_info);
    frame_boundary = packet->is_last_packet_in_frame();
    //遍历到最后一个包后将各个包打包成video_coding::RtpFrameObject结构  
    if (packet->is_last_packet_in_frame()) {
      auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
      RTC_CHECK(depacketizer_it != payload_type_map_.end());
      rtc::scoped_refptr<EncodedImageBuffer> bitstream =
          depacketizer_it->second->AssembleFrame(payloads);
      if (!bitstream) {
        // Failed to assemble a frame. Discard and continue.
        continue;
      }
      const video_coding::PacketBuffer::Packet& last_packet = *packet;
      OnAssembledFrame(std::make_unique<video_coding::RtpFrameObject>(
          first_packet->seq_num,                    //
          last_packet.seq_num,                      //
          last_packet.marker_bit,                   //
          max_nack_count,                           //
          min_recv_time,                            //
          max_recv_time,                            //
          first_packet->timestamp,                  //
          first_packet->ntp_time_ms,                //
          last_packet.video_header.video_timing,    //
          first_packet->payload_type,               //
          first_packet->codec(),                    //
          last_packet.video_header.rotation,        //
          last_packet.video_header.content_type,    //
          first_packet->video_header,               //
          last_packet.video_header.color_space,     //
          RtpPacketInfos(std::move(packet_infos)),  //
          std::move(bitstream)));
    }
  }
  RTC_DCHECK(frame_boundary);
  if (result.buffer_cleared) {
    RequestKeyFrame();
  }
}

9) 总结