mirror of
https://github.com/ossrs/srs.git
synced 2025-03-09 15:49:59 +00:00
Squash: Fix rtc to rtmp sync timestamp using sender report. #2470
This commit is contained in:
parent
3d58e98d1c
commit
85620a34f5
309 changed files with 14837 additions and 8525 deletions
|
@ -22,6 +22,48 @@ static const char* id2codec_name(SrsAudioCodecId id)
|
|||
}
|
||||
}
|
||||
|
||||
class SrsFFmpegLogHelper {
|
||||
public:
|
||||
SrsFFmpegLogHelper() {
|
||||
av_log_set_callback(ffmpeg_log_callback);
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
}
|
||||
|
||||
static void ffmpeg_log_callback(void*, int level, const char* fmt, va_list vl)
|
||||
{
|
||||
static char buf[4096] = {0};
|
||||
int nbytes = vsnprintf(buf, sizeof(buf), fmt, vl);
|
||||
if (nbytes > 0) {
|
||||
// Srs log is always start with new line, replcae '\n' to '\0', make log easy to read.
|
||||
if (buf[nbytes - 1] == '\n') {
|
||||
buf[nbytes - 1] = '\0';
|
||||
}
|
||||
switch (level) {
|
||||
case AV_LOG_PANIC:
|
||||
case AV_LOG_FATAL:
|
||||
case AV_LOG_ERROR:
|
||||
srs_error("%s", buf);
|
||||
break;
|
||||
case AV_LOG_WARNING:
|
||||
srs_warn("%s", buf);
|
||||
break;
|
||||
case AV_LOG_INFO:
|
||||
srs_trace("%s", buf);
|
||||
break;
|
||||
case AV_LOG_VERBOSE:
|
||||
case AV_LOG_DEBUG:
|
||||
case AV_LOG_TRACE:
|
||||
default:
|
||||
srs_verbose("%s", buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Register FFmpeg log callback funciton.
|
||||
SrsFFmpegLogHelper _srs_ffmpeg_log_helper;
|
||||
|
||||
SrsAudioTranscoder::SrsAudioTranscoder()
|
||||
{
|
||||
dec_ = NULL;
|
||||
|
|
|
@ -1521,10 +1521,10 @@ srs_error_t SrsRtcPublishStream::on_rtcp_sr(SrsRtcpSR* rtcp)
|
|||
srs_error_t err = srs_success;
|
||||
SrsNtp srs_ntp = SrsNtp::to_time_ms(rtcp->get_ntp());
|
||||
|
||||
srs_verbose("sender report, ssrc_of_sender=%u, rtp_time=%u, sender_packet_count=%u, sender_octec_count=%u",
|
||||
rtcp->get_ssrc(), rtcp->get_rtp_ts(), rtcp->get_rtp_send_packets(), rtcp->get_rtp_send_bytes());
|
||||
srs_verbose("sender report, ssrc_of_sender=%u, rtp_time=%u, sender_packet_count=%u, sender_octec_count=%u, ms=%u",
|
||||
rtcp->get_ssrc(), rtcp->get_rtp_ts(), rtcp->get_rtp_send_packets(), rtcp->get_rtp_send_bytes(), srs_ntp.system_ms_);
|
||||
|
||||
update_send_report_time(rtcp->get_ssrc(), srs_ntp);
|
||||
update_send_report_time(rtcp->get_ssrc(), srs_ntp, rtcp->get_rtp_ts());
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1664,16 +1664,16 @@ void SrsRtcPublishStream::update_rtt(uint32_t ssrc, int rtt)
|
|||
}
|
||||
}
|
||||
|
||||
void SrsRtcPublishStream::update_send_report_time(uint32_t ssrc, const SrsNtp& ntp)
|
||||
void SrsRtcPublishStream::update_send_report_time(uint32_t ssrc, const SrsNtp& ntp, uint32_t rtp_time)
|
||||
{
|
||||
SrsRtcVideoRecvTrack* video_track = get_video_track(ssrc);
|
||||
if (video_track) {
|
||||
return video_track->update_send_report_time(ntp);
|
||||
return video_track->update_send_report_time(ntp, rtp_time);
|
||||
}
|
||||
|
||||
SrsRtcAudioRecvTrack* audio_track = get_audio_track(ssrc);
|
||||
if (audio_track) {
|
||||
return audio_track->update_send_report_time(ntp);
|
||||
return audio_track->update_send_report_time(ntp, rtp_time);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -376,7 +376,7 @@ private:
|
|||
SrsRtcAudioRecvTrack* get_audio_track(uint32_t ssrc);
|
||||
SrsRtcVideoRecvTrack* get_video_track(uint32_t ssrc);
|
||||
void update_rtt(uint32_t ssrc, int rtt);
|
||||
void update_send_report_time(uint32_t ssrc, const SrsNtp& ntp);
|
||||
void update_send_report_time(uint32_t ssrc, const SrsNtp& ntp, uint32_t rtp_time);
|
||||
private:
|
||||
virtual void http_hooks_on_unpublish();
|
||||
};
|
||||
|
|
|
@ -26,7 +26,7 @@ class SrsRtpRingBuffer;
|
|||
// \___(no received, in nack list)
|
||||
// * seq1: The packet is done, we have already got and processed it.
|
||||
// * seq2,seq3,...,seq10,seq12,seq13: Theses packets are in queue and wait to be processed.
|
||||
// * seq10: This packet is lost or not received, we will put it in the nack list.
|
||||
// * seq11: This packet is lost or not received, we will put it in the nack list.
|
||||
// We store the received packets in ring buffer.
|
||||
class SrsRtpRingBuffer
|
||||
{
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include <srs_app_rtc_source.hpp>
|
||||
|
||||
#include <math.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <srs_app_conn.hpp>
|
||||
|
@ -132,7 +133,7 @@ SrsNtp SrsNtp::to_time_ms(uint64_t ntp)
|
|||
srs_ntp.ntp_second_ = (ntp & 0xFFFFFFFF00000000ULL) >> 32;
|
||||
srs_ntp.ntp_fractions_ = (ntp & 0x00000000FFFFFFFFULL);
|
||||
srs_ntp.system_ms_ = (static_cast<uint64_t>(srs_ntp.ntp_second_) * 1000) +
|
||||
(static_cast<double>(static_cast<uint64_t>(srs_ntp.ntp_fractions_) * 1000.0) / kMagicNtpFractionalUnit);
|
||||
round((static_cast<double>(static_cast<uint64_t>(srs_ntp.ntp_fractions_) * 1000.0) / kMagicNtpFractionalUnit));
|
||||
return srs_ntp;
|
||||
}
|
||||
|
||||
|
@ -1310,8 +1311,14 @@ srs_error_t SrsRtmpFromRtcBridger::on_rtp(SrsRtpPacket *pkt)
|
|||
return err;
|
||||
}
|
||||
|
||||
// Have no received any sender report, can't calculate avsync_time,
|
||||
// discard it to avoid timestamp problem in live source
|
||||
if (pkt->get_avsync_time() <= 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pkt->is_audio()) {
|
||||
err = trancode_audio(pkt);
|
||||
err = transcode_audio(pkt);
|
||||
} else {
|
||||
err = packet_video(pkt);
|
||||
}
|
||||
|
@ -1325,12 +1332,12 @@ void SrsRtmpFromRtcBridger::on_unpublish()
|
|||
source_->on_unpublish();
|
||||
}
|
||||
|
||||
srs_error_t SrsRtmpFromRtcBridger::trancode_audio(SrsRtpPacket *pkt)
|
||||
srs_error_t SrsRtmpFromRtcBridger::transcode_audio(SrsRtpPacket *pkt)
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// to common message.
|
||||
uint32_t ts = pkt->header.get_timestamp()/(48000/1000);
|
||||
uint32_t ts = pkt->get_avsync_time();
|
||||
if (is_first_audio) {
|
||||
int header_len = 0;
|
||||
uint8_t* header = NULL;
|
||||
|
@ -1361,7 +1368,7 @@ srs_error_t SrsRtmpFromRtcBridger::trancode_audio(SrsRtpPacket *pkt)
|
|||
|
||||
for (std::vector<SrsAudioFrame *>::iterator it = out_pkts.begin(); it != out_pkts.end(); ++it) {
|
||||
SrsCommonMessage out_rtmp;
|
||||
out_rtmp.header.timestamp = (*it)->dts*(48000/1000);
|
||||
out_rtmp.header.timestamp = (*it)->dts;
|
||||
packet_aac(&out_rtmp, (*it)->samples[0].bytes, (*it)->samples[0].size, ts, is_first_audio);
|
||||
|
||||
if ((err = source_->on_audio(&out_rtmp)) != srs_success) {
|
||||
|
@ -1407,7 +1414,7 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video(SrsRtpPacket* src)
|
|||
cache_video_pkts_[index].in_use = true;
|
||||
cache_video_pkts_[index].pkt = pkt;
|
||||
cache_video_pkts_[index].sn = pkt->header.get_sequence();
|
||||
cache_video_pkts_[index].ts = pkt->header.get_timestamp();
|
||||
cache_video_pkts_[index].ts = pkt->get_avsync_time();
|
||||
|
||||
// check whether to recovery lost packet and can construct a video frame
|
||||
if (lost_sn_ == pkt->header.get_sequence()) {
|
||||
|
@ -1444,7 +1451,7 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_key_frame(SrsRtpPacket* pkt)
|
|||
//type_codec1 + avc_type + composition time + fix header + count of sps + len of sps + sps + count of pps + len of pps + pps
|
||||
int nb_payload = 1 + 1 + 3 + 5 + 1 + 2 + sps->size + 1 + 2 + pps->size;
|
||||
SrsCommonMessage rtmp;
|
||||
rtmp.header.initialize_video(nb_payload, pkt->header.get_timestamp() / 90, 1);
|
||||
rtmp.header.initialize_video(nb_payload, pkt->get_avsync_time(), 1);
|
||||
rtmp.create_payload(nb_payload);
|
||||
rtmp.size = nb_payload;
|
||||
SrsBuffer payload(rtmp.payload, rtmp.size);
|
||||
|
@ -1472,18 +1479,18 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_key_frame(SrsRtpPacket* pkt)
|
|||
}
|
||||
|
||||
if (-1 == key_frame_ts_) {
|
||||
key_frame_ts_ = pkt->header.get_timestamp();
|
||||
key_frame_ts_ = pkt->get_avsync_time();
|
||||
header_sn_ = pkt->header.get_sequence();
|
||||
lost_sn_ = header_sn_ + 1;
|
||||
// Received key frame and clean cache of old p frame pkts
|
||||
clear_cached_video();
|
||||
srs_trace("set ts=%lld, header=%hu, lost=%hu", key_frame_ts_, header_sn_, lost_sn_);
|
||||
} else if (key_frame_ts_ != pkt->header.get_timestamp()) {
|
||||
} else if (key_frame_ts_ != pkt->get_avsync_time()) {
|
||||
//new key frame, clean cache
|
||||
int64_t old_ts = key_frame_ts_;
|
||||
uint16_t old_header_sn = header_sn_;
|
||||
uint16_t old_lost_sn = lost_sn_;
|
||||
key_frame_ts_ = pkt->header.get_timestamp();
|
||||
key_frame_ts_ = pkt->get_avsync_time();
|
||||
header_sn_ = pkt->header.get_sequence();
|
||||
lost_sn_ = header_sn_ + 1;
|
||||
clear_cached_video();
|
||||
|
@ -1495,7 +1502,7 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_key_frame(SrsRtpPacket* pkt)
|
|||
cache_video_pkts_[index].in_use = true;
|
||||
cache_video_pkts_[index].pkt = pkt;
|
||||
cache_video_pkts_[index].sn = pkt->header.get_sequence();
|
||||
cache_video_pkts_[index].ts = pkt->header.get_timestamp();
|
||||
cache_video_pkts_[index].ts = pkt->get_avsync_time();
|
||||
|
||||
int32_t sn = lost_sn_;
|
||||
uint16_t tail_sn = 0;
|
||||
|
@ -1570,12 +1577,12 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_rtmp(const uint16_t start, const
|
|||
nb_payload += 1 + 1 + 3;
|
||||
|
||||
SrsCommonMessage rtmp;
|
||||
SrsRtpPacket* header = cache_video_pkts_[cache_index(start)].pkt;
|
||||
rtmp.header.initialize_video(nb_payload, header->header.get_timestamp() / 90, 1);
|
||||
SrsRtpPacket* pkt = cache_video_pkts_[cache_index(start)].pkt;
|
||||
rtmp.header.initialize_video(nb_payload, pkt->get_avsync_time(), 1);
|
||||
rtmp.create_payload(nb_payload);
|
||||
rtmp.size = nb_payload;
|
||||
SrsBuffer payload(rtmp.payload, rtmp.size);
|
||||
if (header->is_keyframe()) {
|
||||
if (pkt->is_keyframe()) {
|
||||
payload.write_1bytes(0x17); // type(4 bits): key frame; code(4bits): avc
|
||||
key_frame_ts_ = -1;
|
||||
} else {
|
||||
|
@ -2214,7 +2221,9 @@ SrsRtcRecvTrack::SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescripti
|
|||
nack_receiver_ = new SrsRtpNackForReceiver(rtp_queue_, 1000 * 2 / 3);
|
||||
}
|
||||
|
||||
last_sender_report_sys_time = 0;
|
||||
last_sender_report_rtp_time_ = 0;
|
||||
last_sender_report_rtp_time1_ = 0;
|
||||
last_sender_report_sys_time_ = 0;
|
||||
}
|
||||
|
||||
SrsRtcRecvTrack::~SrsRtcRecvTrack()
|
||||
|
@ -2239,11 +2248,54 @@ void SrsRtcRecvTrack::update_rtt(int rtt)
|
|||
nack_receiver_->update_rtt(rtt);
|
||||
}
|
||||
|
||||
void SrsRtcRecvTrack::update_send_report_time(const SrsNtp& ntp)
|
||||
void SrsRtcRecvTrack::update_send_report_time(const SrsNtp& ntp, uint32_t rtp_time)
|
||||
{
|
||||
last_sender_report_ntp = ntp;
|
||||
last_sender_report_ntp1_ = last_sender_report_ntp_;
|
||||
last_sender_report_rtp_time1_ = last_sender_report_rtp_time_;
|
||||
|
||||
last_sender_report_ntp_ = ntp;
|
||||
last_sender_report_rtp_time_ = rtp_time;
|
||||
|
||||
// TODO: FIXME: Use system wall clock.
|
||||
last_sender_report_sys_time = srs_update_system_time();;
|
||||
last_sender_report_sys_time_ = srs_update_system_time();
|
||||
}
|
||||
|
||||
int64_t SrsRtcRecvTrack::cal_avsync_time(uint32_t rtp_time)
|
||||
{
|
||||
// Have no recv at least 2 sender reports, can't calculate sync time.
|
||||
// TODO: FIXME: use the sample rate from sdp.
|
||||
if (last_sender_report_rtp_time1_ <= 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// WebRTC using sender report to sync audio/video timestamp, because audio video have different timebase,
|
||||
// typical audio opus is 48000Hz, video is 90000Hz.
|
||||
// We using two sender report point to calculate avsync timestamp(clock time) with any given rtp timestamp.
|
||||
// For example, there are two history sender report of audio as below.
|
||||
// sender_report1: rtp_time1 = 10000, ntp_time1 = 40000
|
||||
// sender_report : rtp_time = 10960, ntp_time = 40020
|
||||
// (rtp_time - rtp_time1) / (ntp_time - ntp_time1) = 960 / 20 = 48,
|
||||
// Now we can calcualte ntp time(ntp_x) of any given rtp timestamp(rtp_x),
|
||||
// (rtp_x - rtp_time) / (ntp_x - ntp_time) = 48 => ntp_x = (rtp_x - rtp_time) / 48 + ntp_time;
|
||||
double sys_time_elapsed = static_cast<double>(last_sender_report_ntp_.system_ms_) - static_cast<double>(last_sender_report_ntp1_.system_ms_);
|
||||
|
||||
// Check sys_time_elapsed is equal to zero.
|
||||
if (fpclassify(sys_time_elapsed) == FP_ZERO) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
double rtp_time_elpased = static_cast<double>(last_sender_report_rtp_time_) - static_cast<double>(last_sender_report_rtp_time1_);
|
||||
int rate = round(rtp_time_elpased / sys_time_elapsed);
|
||||
|
||||
if (rate <= 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
double delta = round((rtp_time - last_sender_report_rtp_time_) / rate);
|
||||
|
||||
int64_t avsync_time = delta + last_sender_report_ntp_.system_ms_;
|
||||
|
||||
return avsync_time;
|
||||
}
|
||||
|
||||
srs_error_t SrsRtcRecvTrack::send_rtcp_rr()
|
||||
|
@ -2251,8 +2303,8 @@ srs_error_t SrsRtcRecvTrack::send_rtcp_rr()
|
|||
srs_error_t err = srs_success;
|
||||
|
||||
uint32_t ssrc = track_desc_->ssrc_;
|
||||
const uint64_t& last_time = last_sender_report_sys_time;
|
||||
if ((err = session_->send_rtcp_rr(ssrc, rtp_queue_, last_time, last_sender_report_ntp)) != srs_success) {
|
||||
const uint64_t& last_time = last_sender_report_sys_time_;
|
||||
if ((err = session_->send_rtcp_rr(ssrc, rtp_queue_, last_time, last_sender_report_ntp_)) != srs_success) {
|
||||
return srs_error_wrap(err, "ssrc=%u, last_time=%" PRId64, ssrc, last_time);
|
||||
}
|
||||
|
||||
|
@ -2357,6 +2409,8 @@ srs_error_t SrsRtcAudioRecvTrack::on_rtp(SrsRtcSource* source, SrsRtpPacket* pkt
|
|||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
pkt->set_avsync_time(cal_avsync_time(pkt->header.get_timestamp()));
|
||||
|
||||
if ((err = source->on_rtp(pkt)) != srs_success) {
|
||||
return srs_error_wrap(err, "source on rtp");
|
||||
}
|
||||
|
@ -2415,6 +2469,8 @@ srs_error_t SrsRtcVideoRecvTrack::on_rtp(SrsRtcSource* source, SrsRtpPacket* pkt
|
|||
|
||||
pkt->frame_type = SrsFrameTypeVideo;
|
||||
|
||||
pkt->set_avsync_time(cal_avsync_time(pkt->header.get_timestamp()));
|
||||
|
||||
if ((err = source->on_rtp(pkt)) != srs_success) {
|
||||
return srs_error_wrap(err, "source on rtp");
|
||||
}
|
||||
|
|
|
@ -318,7 +318,7 @@ public:
|
|||
virtual srs_error_t on_rtp(SrsRtpPacket *pkt);
|
||||
virtual void on_unpublish();
|
||||
private:
|
||||
srs_error_t trancode_audio(SrsRtpPacket *pkt);
|
||||
srs_error_t transcode_audio(SrsRtpPacket *pkt);
|
||||
void packet_aac(SrsCommonMessage* audio, char* data, int len, uint32_t pts, bool is_header);
|
||||
srs_error_t packet_video(SrsRtpPacket* pkt);
|
||||
srs_error_t packet_video_key_frame(SrsRtpPacket* pkt);
|
||||
|
@ -519,9 +519,15 @@ private:
|
|||
// By config, whether no copy.
|
||||
bool nack_no_copy_;
|
||||
protected:
|
||||
// send report ntp and received time.
|
||||
SrsNtp last_sender_report_ntp;
|
||||
uint64_t last_sender_report_sys_time;
|
||||
// Latest sender report ntp and rtp time.
|
||||
SrsNtp last_sender_report_ntp_;
|
||||
int64_t last_sender_report_rtp_time_;
|
||||
|
||||
// Prev sender report ntp and rtp time.
|
||||
SrsNtp last_sender_report_ntp1_;
|
||||
int64_t last_sender_report_rtp_time1_;
|
||||
|
||||
uint64_t last_sender_report_sys_time_;
|
||||
public:
|
||||
SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* stream_descs, bool is_audio);
|
||||
virtual ~SrsRtcRecvTrack();
|
||||
|
@ -531,7 +537,8 @@ public:
|
|||
bool has_ssrc(uint32_t ssrc);
|
||||
uint32_t get_ssrc();
|
||||
void update_rtt(int rtt);
|
||||
void update_send_report_time(const SrsNtp& ntp);
|
||||
void update_send_report_time(const SrsNtp& ntp, uint32_t rtp_time);
|
||||
int64_t cal_avsync_time(uint32_t rtp_time);
|
||||
srs_error_t send_rtcp_rr();
|
||||
srs_error_t send_rtcp_xr_rrtr();
|
||||
bool set_track_status(bool active);
|
||||
|
|
|
@ -9,6 +9,6 @@
|
|||
|
||||
#define VERSION_MAJOR 4
|
||||
#define VERSION_MINOR 0
|
||||
#define VERSION_REVISION 156
|
||||
#define VERSION_REVISION 157
|
||||
|
||||
#endif
|
||||
|
|
|
@ -762,6 +762,7 @@ SrsRtpPacket::SrsRtpPacket()
|
|||
frame_type = SrsFrameTypeReserved;
|
||||
cached_payload_size = 0;
|
||||
decode_handler = NULL;
|
||||
avsync_time_ = -1;
|
||||
|
||||
++_srs_pps_objs_rtps->sugar;
|
||||
}
|
||||
|
@ -835,6 +836,8 @@ SrsRtpPacket* SrsRtpPacket::copy()
|
|||
// For performance issue, do not copy the unused field.
|
||||
cp->decode_handler = decode_handler;
|
||||
|
||||
cp->avsync_time_ = avsync_time_;
|
||||
|
||||
return cp;
|
||||
}
|
||||
|
||||
|
|
|
@ -294,6 +294,8 @@ private:
|
|||
int cached_payload_size;
|
||||
// The helper handler for decoder, use RAW payload if NULL.
|
||||
ISrsRtspPacketDecodeHandler* decode_handler;
|
||||
private:
|
||||
int64_t avsync_time_;
|
||||
public:
|
||||
SrsRtpPacket();
|
||||
virtual ~SrsRtpPacket();
|
||||
|
@ -329,6 +331,8 @@ public:
|
|||
virtual srs_error_t decode(SrsBuffer* buf);
|
||||
public:
|
||||
bool is_keyframe();
|
||||
void set_avsync_time(int64_t avsync_time) { avsync_time_ = avsync_time; }
|
||||
int64_t get_avsync_time() const { return avsync_time_; }
|
||||
};
|
||||
|
||||
// Single payload data.
|
||||
|
|
|
@ -897,3 +897,248 @@ VOID TEST(KernelRTCTest, DefaultTrackStatus)
|
|||
}
|
||||
}
|
||||
|
||||
VOID TEST(KernelRTCTest, Ntp)
|
||||
{
|
||||
if (true) {
|
||||
// Test small systime, from 0-10000ms.
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
srs_utime_t now_ms = i;
|
||||
// Cover systime to ntp
|
||||
SrsNtp ntp = SrsNtp::from_time_ms(now_ms);
|
||||
|
||||
ASSERT_EQ(ntp.system_ms_, now_ms);
|
||||
|
||||
// Cover ntp to systime
|
||||
SrsNtp ntp1 = SrsNtp::to_time_ms(ntp.ntp_);
|
||||
ASSERT_EQ(ntp1.system_ms_, now_ms);
|
||||
}
|
||||
}
|
||||
|
||||
if (true) {
|
||||
// Test current systime to ntp.
|
||||
srs_utime_t now_ms = srs_get_system_time() / 1000;
|
||||
SrsNtp ntp = SrsNtp::from_time_ms(now_ms);
|
||||
|
||||
ASSERT_EQ(ntp.system_ms_, now_ms);
|
||||
|
||||
SrsNtp ntp1 = SrsNtp::to_time_ms(ntp.ntp_);
|
||||
ASSERT_EQ(ntp1.system_ms_, now_ms);
|
||||
}
|
||||
}
|
||||
|
||||
VOID TEST(KernelRTCTest, SyncTimestampBySenderReportNormal)
|
||||
{
|
||||
SrsRtcConnection s(NULL, SrsContextId());
|
||||
SrsRtcPublishStream publish(&s, SrsContextId());
|
||||
|
||||
SrsRtcTrackDescription video_ds;
|
||||
video_ds.type_ = "video";
|
||||
video_ds.id_ = "VMo22nfLDn122nfnDNL2";
|
||||
video_ds.ssrc_ = 200;
|
||||
|
||||
SrsRtcVideoRecvTrack* video = new SrsRtcVideoRecvTrack(&s, &video_ds);
|
||||
publish.video_tracks_.push_back(video);
|
||||
|
||||
publish.set_all_tracks_status(true);
|
||||
|
||||
SrsRtcSource* rtc_source = new SrsRtcSource();
|
||||
SrsAutoFree(SrsRtcSource, rtc_source);
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
if (true)
|
||||
{
|
||||
SrsRtpPacket* video_rtp_pkt = new SrsRtpPacket();
|
||||
SrsAutoFree(SrsRtpPacket, video_rtp_pkt);
|
||||
|
||||
uint32_t video_absolute_ts = srs_get_system_time();
|
||||
uint32_t video_rtp_ts = random();
|
||||
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
// No received any sender report, can not calculate absolute time, expect equal to -1.
|
||||
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);
|
||||
|
||||
SrsNtp ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
|
||||
SrsRtcpSR* video_sr = new SrsRtcpSR();
|
||||
SrsAutoFree(SrsRtcpSR, video_sr);
|
||||
video_sr->set_ssrc(200);
|
||||
|
||||
video_sr->set_ntp(ntp.ntp_);
|
||||
video_sr->set_rtp_ts(video_rtp_ts);
|
||||
publish.on_rtcp_sr(video_sr);
|
||||
|
||||
// Video timebase 90000, fps=25
|
||||
video_rtp_ts += 3600;
|
||||
video_absolute_ts += 40;
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
|
||||
// Received one sender report, can not calculate absolute time, expect equal to -1.
|
||||
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);
|
||||
|
||||
ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
video_sr->set_ntp(ntp.ntp_);
|
||||
video_sr->set_rtp_ts(video_rtp_ts);
|
||||
publish.on_rtcp_sr(video_sr);
|
||||
|
||||
for (int i = 0; i <= 1000; ++i) {
|
||||
// Video timebase 90000, fps=25
|
||||
video_rtp_ts += 3600;
|
||||
video_absolute_ts += 40;
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
EXPECT_NEAR(video_rtp_pkt->get_avsync_time(), video_absolute_ts, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VOID TEST(KernelRTCTest, SyncTimestampBySenderReportOutOfOrder)
|
||||
{
|
||||
SrsRtcConnection s(NULL, SrsContextId());
|
||||
SrsRtcPublishStream publish(&s, SrsContextId());
|
||||
|
||||
SrsRtcTrackDescription video_ds;
|
||||
video_ds.type_ = "video";
|
||||
video_ds.id_ = "VMo22nfLDn122nfnDNL2";
|
||||
video_ds.ssrc_ = 200;
|
||||
|
||||
SrsRtcVideoRecvTrack* video = new SrsRtcVideoRecvTrack(&s, &video_ds);
|
||||
publish.video_tracks_.push_back(video);
|
||||
|
||||
publish.set_all_tracks_status(true);
|
||||
|
||||
SrsRtcSource* rtc_source = new SrsRtcSource();
|
||||
SrsAutoFree(SrsRtcSource, rtc_source);
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
if (true)
|
||||
{
|
||||
SrsRtpPacket* video_rtp_pkt = new SrsRtpPacket();
|
||||
SrsAutoFree(SrsRtpPacket, video_rtp_pkt);
|
||||
|
||||
uint32_t video_absolute_ts = srs_get_system_time();
|
||||
uint32_t video_rtp_ts = random();
|
||||
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
// No received any sender report, can not calculate absolute time, expect equal to -1.
|
||||
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);
|
||||
|
||||
SrsNtp ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
|
||||
SrsRtcpSR* video_sr1 = new SrsRtcpSR();
|
||||
SrsAutoFree(SrsRtcpSR, video_sr1);
|
||||
video_sr1->set_ssrc(200);
|
||||
|
||||
video_sr1->set_ntp(ntp.ntp_);
|
||||
video_sr1->set_rtp_ts(video_rtp_ts);
|
||||
|
||||
// Video timebase 90000, fps=25
|
||||
video_rtp_ts += 3600;
|
||||
video_absolute_ts += 40;
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
|
||||
// No received any sender report, can not calculate absolute time, expect equal to -1.
|
||||
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);
|
||||
|
||||
ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
SrsRtcpSR* video_sr2 = new SrsRtcpSR();
|
||||
SrsAutoFree(SrsRtcpSR, video_sr2);
|
||||
video_sr2->set_ssrc(200);
|
||||
video_sr2->set_ntp(ntp.ntp_);
|
||||
video_sr2->set_rtp_ts(video_rtp_ts);
|
||||
|
||||
// Sender report out of order, sr2 arrived befreo sr1.
|
||||
publish.on_rtcp_sr(video_sr2);
|
||||
publish.on_rtcp_sr(video_sr1);
|
||||
|
||||
for (int i = 0; i <= 1000; ++i) {
|
||||
// Video timebase 90000, fps=25
|
||||
video_rtp_ts += 3600;
|
||||
video_absolute_ts += 40;
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
EXPECT_NEAR(video_rtp_pkt->get_avsync_time(), video_absolute_ts, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VOID TEST(KernelRTCTest, SyncTimestampBySenderReportConsecutive)
|
||||
{
|
||||
SrsRtcConnection s(NULL, SrsContextId());
|
||||
SrsRtcPublishStream publish(&s, SrsContextId());
|
||||
|
||||
SrsRtcTrackDescription video_ds;
|
||||
video_ds.type_ = "video";
|
||||
video_ds.id_ = "VMo22nfLDn122nfnDNL2";
|
||||
video_ds.ssrc_ = 200;
|
||||
|
||||
SrsRtcVideoRecvTrack* video = new SrsRtcVideoRecvTrack(&s, &video_ds);
|
||||
publish.video_tracks_.push_back(video);
|
||||
|
||||
publish.set_all_tracks_status(true);
|
||||
|
||||
SrsRtcSource* rtc_source = new SrsRtcSource();
|
||||
SrsAutoFree(SrsRtcSource, rtc_source);
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
if (true)
|
||||
{
|
||||
SrsRtpPacket* video_rtp_pkt = new SrsRtpPacket();
|
||||
SrsAutoFree(SrsRtpPacket, video_rtp_pkt);
|
||||
|
||||
uint32_t video_absolute_ts = srs_get_system_time();
|
||||
uint32_t video_rtp_ts = random();
|
||||
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
// No received any sender report, can not calculate absolute time, expect equal to -1.
|
||||
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);
|
||||
|
||||
SrsNtp ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
|
||||
SrsRtcpSR* video_sr = new SrsRtcpSR();
|
||||
SrsAutoFree(SrsRtcpSR, video_sr);
|
||||
video_sr->set_ssrc(200);
|
||||
|
||||
video_sr->set_ntp(ntp.ntp_);
|
||||
video_sr->set_rtp_ts(video_rtp_ts);
|
||||
publish.on_rtcp_sr(video_sr);
|
||||
|
||||
// Video timebase 90000, fps=25
|
||||
video_rtp_ts += 3600;
|
||||
video_absolute_ts += 40;
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
|
||||
// Received one sender report, can not calculate absolute time, expect equal to -1.
|
||||
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);
|
||||
|
||||
ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
video_sr->set_ntp(ntp.ntp_);
|
||||
video_sr->set_rtp_ts(video_rtp_ts);
|
||||
publish.on_rtcp_sr(video_sr);
|
||||
|
||||
for (int i = 0; i <= 1000; ++i) {
|
||||
// Video timebase 90000, fps=25
|
||||
video_rtp_ts += 3600;
|
||||
video_absolute_ts += 40;
|
||||
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
|
||||
video->on_rtp(rtc_source, video_rtp_pkt);
|
||||
EXPECT_NEAR(video_rtp_pkt->get_avsync_time(), video_absolute_ts, 1);
|
||||
|
||||
// Send sender report every 4 seconds.
|
||||
if (i % 100 == 99) {
|
||||
ntp = SrsNtp::from_time_ms(video_absolute_ts);
|
||||
video_sr->set_ntp(ntp.ntp_);
|
||||
video_sr->set_rtp_ts(video_rtp_ts);
|
||||
publish.on_rtcp_sr(video_sr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue