1
0
Fork 0
mirror of https://github.com/ossrs/srs.git synced 2025-03-09 15:49:59 +00:00

RTC: Refine stream/ssrc/sdp structure

This commit is contained in:
winlin 2020-07-15 13:11:35 +08:00
parent 426938cc8a
commit 991672bf41
11 changed files with 2204 additions and 563 deletions

View file

@ -35,6 +35,8 @@
#include <srs_kernel_buffer.hpp>
#include <srs_kernel_rtc_rtp.hpp>
#include <srs_core_autofree.hpp>
#include <srs_app_rtc_queue.hpp>
#include <srs_app_rtc_conn.hpp>
#ifdef SRS_FFMPEG_FIT
#include <srs_app_rtc_codec.hpp>
@ -90,6 +92,41 @@ srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFor
return err;
}
uint64_t SrsNtp::kMagicNtpFractionalUnit = 1ULL << 32;
SrsNtp::SrsNtp()
{
system_ms_ = 0;
ntp_ = 0;
ntp_second_ = 0;
ntp_fractions_ = 0;
}
SrsNtp::~SrsNtp()
{
}
SrsNtp SrsNtp::from_time_ms(uint64_t ms)
{
SrsNtp srs_ntp;
srs_ntp.system_ms_ = ms;
srs_ntp.ntp_second_ = ms / 1000;
srs_ntp.ntp_fractions_ = (static_cast<double>(ms % 1000 / 1000.0)) * kMagicNtpFractionalUnit;
srs_ntp.ntp_ = (static_cast<uint64_t>(srs_ntp.ntp_second_) << 32) | srs_ntp.ntp_fractions_;
return srs_ntp;
}
SrsNtp SrsNtp::to_time_ms(uint64_t ntp)
{
SrsNtp srs_ntp;
srs_ntp.ntp_ = ntp;
srs_ntp.ntp_second_ = (ntp & 0xFFFFFFFF00000000ULL) >> 32;
srs_ntp.ntp_fractions_ = (ntp & 0x00000000FFFFFFFFULL);
srs_ntp.system_ms_ = (static_cast<uint64_t>(srs_ntp.ntp_second_) * 1000) +
(static_cast<double>(static_cast<uint64_t>(srs_ntp.ntp_fractions_) * 1000.0) / kMagicNtpFractionalUnit);
return srs_ntp;
}
SrsRtcConsumer::SrsRtcConsumer(SrsRtcStream* s)
{
source = s;
@ -254,6 +291,7 @@ SrsRtcStream::SrsRtcStream()
#else
bridger_ = new SrsRtcDummyBridger();
#endif
stream_desc_ = NULL;
}
SrsRtcStream::~SrsRtcStream()
@ -264,6 +302,7 @@ SrsRtcStream::~SrsRtcStream()
srs_freep(req);
srs_freep(bridger_);
srs_freep(stream_desc_);
}
srs_error_t SrsRtcStream::initialize(SrsRequest* r)
@ -423,6 +462,36 @@ srs_error_t SrsRtcStream::on_rtp(SrsRtpPacket2* pkt)
return err;
}
void SrsRtcStream::set_stream_desc(SrsRtcStreamDescription* stream_desc)
{
srs_freep(stream_desc_);
stream_desc_ = stream_desc->copy();
}
std::vector<SrsRtcTrackDescription*> SrsRtcStream::get_track_desc(std::string type, std::string media_name)
{
std::vector<SrsRtcTrackDescription*> track_descs;
if (!stream_desc_) {
return track_descs;
}
if (type == "audio") {
if (stream_desc_->audio_track_desc_->media_->name_ == media_name) {
track_descs.push_back(stream_desc_->audio_track_desc_);
}
}
if (type == "video") {
std::vector<SrsRtcTrackDescription*>::iterator it = stream_desc_->video_track_descs_.begin();
while (it != stream_desc_->video_track_descs_.end() ){
track_descs.push_back(*it);
++it;
}
}
return track_descs;
}
#ifdef SRS_FFMPEG_FIT
SrsRtcFromRtmpBridger::SrsRtcFromRtmpBridger(SrsRtcStream* source)
{
@ -975,3 +1044,672 @@ void SrsRtcDummyBridger::on_unpublish()
{
}
SrsCodecPayload::SrsCodecPayload()
{
}
SrsCodecPayload::SrsCodecPayload(uint8_t pt, std::string encode_name, int sample)
{
pt_ = pt;
name_ = encode_name;
sample_ = sample;
}
SrsCodecPayload::~SrsCodecPayload()
{
}
SrsCodecPayload* SrsCodecPayload::copy()
{
SrsCodecPayload* cp = new SrsCodecPayload();
cp->type_ = type_;
cp->pt_ = pt_;
cp->name_ = name_;
cp->sample_ = sample_;
cp->rtcp_fbs_ = rtcp_fbs_;
return cp;
}
SrsMediaPayloadType SrsCodecPayload::generate_media_payload_type()
{
SrsMediaPayloadType media_payload_type(pt_);
media_payload_type.encoding_name_ = name_;
media_payload_type.clock_rate_ = sample_;
media_payload_type.rtcp_fb_ = rtcp_fbs_;
return media_payload_type;
}
SrsVideoPayload::SrsVideoPayload()
{
}
SrsVideoPayload::SrsVideoPayload(uint8_t pt, std::string encode_name, int sample)
:SrsCodecPayload(pt, encode_name, sample)
{
h264_param_.profile_level_id = "";
h264_param_.packetization_mode = "";
h264_param_.level_asymmerty_allow = "";
}
SrsVideoPayload::~SrsVideoPayload()
{
}
SrsVideoPayload* SrsVideoPayload::copy()
{
SrsVideoPayload* cp = new SrsVideoPayload();
cp->type_ = type_;
cp->pt_ = pt_;
cp->name_ = name_;
cp->sample_ = sample_;
cp->rtcp_fbs_ = rtcp_fbs_;
cp->h264_param_ = h264_param_;
return cp;
}
SrsMediaPayloadType SrsVideoPayload::generate_media_payload_type()
{
SrsMediaPayloadType media_payload_type(pt_);
media_payload_type.encoding_name_ = name_;
media_payload_type.clock_rate_ = sample_;
media_payload_type.rtcp_fb_ = rtcp_fbs_;
std::ostringstream format_specific_param;
if (!h264_param_.level_asymmerty_allow.empty()) {
format_specific_param << "level-asymmetry-allowed=" << h264_param_.level_asymmerty_allow;
}
if (!h264_param_.packetization_mode.empty()) {
format_specific_param << ";packetization-mode=" << h264_param_.packetization_mode;
}
if (!h264_param_.profile_level_id.empty()) {
format_specific_param << ";profile-level-id=" << h264_param_.profile_level_id;
}
media_payload_type.format_specific_param_ = format_specific_param.str();
return media_payload_type;
}
srs_error_t SrsVideoPayload::set_h264_param_desc(std::string fmtp)
{
srs_error_t err = srs_success;
std::vector<std::string> vec = split_str(fmtp, ";");
for (size_t i = 0; i < vec.size(); ++i) {
std::vector<std::string> kv = split_str(vec[i], "=");
if (kv.size() == 2) {
if (kv[0] == "profile-level-id") {
h264_param_.profile_level_id = kv[1];
} else if (kv[0] == "packetization-mode") {
// 6.3. Non-Interleaved Mode
// This mode is in use when the value of the OPTIONAL packetization-mode
// media type parameter is equal to 1. This mode SHOULD be supported.
// It is primarily intended for low-delay applications. Only single NAL
// unit packets, STAP-As, and FU-As MAY be used in this mode. STAP-Bs,
// MTAPs, and FU-Bs MUST NOT be used. The transmission order of NAL
// units MUST comply with the NAL unit decoding order.
// @see https://tools.ietf.org/html/rfc6184#section-6.3
h264_param_.packetization_mode = kv[1];
} else if (kv[0] == "level-asymmetry-allowed") {
h264_param_.level_asymmerty_allow = kv[1];
} else {
return srs_error_new(ERROR_RTC_SDP_DECODE, "invalid h264 param=%s", kv[0].c_str());
}
} else {
return srs_error_new(ERROR_RTC_SDP_DECODE, "invalid h264 param=%s", vec[i].c_str());
}
}
return err;
}
SrsAudioPayload::SrsAudioPayload()
{
}
SrsAudioPayload::SrsAudioPayload(uint8_t pt, std::string encode_name, int sample, int channel)
:SrsCodecPayload(pt, encode_name, sample)
{
channel_ = channel;
opus_param_.minptime = 0;
opus_param_.use_inband_fec = false;
opus_param_.usedtx = false;
}
SrsAudioPayload::~SrsAudioPayload()
{
}
SrsAudioPayload* SrsAudioPayload::copy()
{
SrsAudioPayload* cp = new SrsAudioPayload();
cp->type_ = type_;
cp->pt_ = pt_;
cp->name_ = name_;
cp->sample_ = sample_;
cp->rtcp_fbs_ = rtcp_fbs_;
cp->channel_ = channel_;
cp->opus_param_ = opus_param_;
return cp;
}
SrsMediaPayloadType SrsAudioPayload::generate_media_payload_type()
{
SrsMediaPayloadType media_payload_type(pt_);
media_payload_type.encoding_name_ = name_;
media_payload_type.clock_rate_ = sample_;
media_payload_type.encoding_param_ = srs_int2str(channel_);
media_payload_type.rtcp_fb_ = rtcp_fbs_;
std::ostringstream format_specific_param;
if (opus_param_.minptime) {
format_specific_param << "minptime=" << opus_param_.minptime;
}
if (opus_param_.use_inband_fec) {
format_specific_param << ";useinbandfec=1";
}
if (opus_param_.usedtx) {
format_specific_param << ";usedtx=1";
}
media_payload_type.format_specific_param_ = format_specific_param.str();
return media_payload_type;
}
srs_error_t SrsAudioPayload::set_opus_param_desc(std::string fmtp)
{
srs_error_t err = srs_success;
std::vector<std::string> vec = split_str(fmtp, ";");
for (size_t i = 0; i < vec.size(); ++i) {
std::vector<std::string> kv = split_str(vec[i], "=");
if (kv.size() == 2) {
if (kv[0] == "minptime") {
opus_param_.minptime = (int)::atol(kv[1].c_str());
} else if (kv[0] == "useinbandfec") {
opus_param_.use_inband_fec = (kv[1] == "1") ? true : false;
} else if (kv[0] == "usedtx") {
opus_param_.usedtx = (kv[1] == "1") ? true : false;
}
} else {
return srs_error_new(ERROR_RTC_SDP_DECODE, "invalid opus param=%s", vec[i].c_str());
}
}
return err;
}
SrsRtcTrackDescription::SrsRtcTrackDescription()
{
ssrc_ = 0;
rtx_ssrc_ = 0;
fec_ssrc_ = 0;
is_active_ = true;
media_ = NULL;
red_ = NULL;
rtx_ = NULL;
ulpfec_ = NULL;
rsfec_ = NULL;
}
SrsRtcTrackDescription::~SrsRtcTrackDescription()
{
srs_freep(media_);
srs_freep(red_);
srs_freep(rtx_);
srs_freep(ulpfec_);
srs_freep(rsfec_);
}
bool SrsRtcTrackDescription::has_ssrc(uint32_t ssrc)
{
if (ssrc == ssrc_ || ssrc == rtx_ssrc_ || ssrc == fec_ssrc_) {
return true;
}
return false;
}
void SrsRtcTrackDescription::add_rtp_extension_desc(int id, std::string uri)
{
extmaps_[id] = uri;
}
void SrsRtcTrackDescription::set_direction(std::string direction)
{
direction_ = direction;
}
void SrsRtcTrackDescription::set_codec_payload(SrsCodecPayload* payload)
{
media_ = payload;
}
void SrsRtcTrackDescription::create_auxiliary_payload(const std::vector<SrsMediaPayloadType> payloads)
{
if (!payloads.size()) {
return;
}
SrsMediaPayloadType payload = payloads.at(0);
if (payload.encoding_name_ == "red"){
srs_freep(red_);
red_ = new SrsCodecPayload(payload.payload_type_, "red", payload.clock_rate_);
} else if (payload.encoding_name_ == "rtx") {
srs_freep(rtx_);
rtx_ = new SrsCodecPayload(payload.payload_type_, "rtx", payload.clock_rate_);
} else if (payload.encoding_name_ == "ulpfec") {
srs_freep(ulpfec_);
ulpfec_ = new SrsCodecPayload(payload.payload_type_, "ulpfec", payload.clock_rate_);
} else if (payload.encoding_name_ == "rsfec") {
srs_freep(rsfec_);
rsfec_ = new SrsCodecPayload(payload.payload_type_, "rsfec", payload.clock_rate_);
}
}
void SrsRtcTrackDescription::set_rtx_ssrc(uint32_t ssrc)
{
rtx_ssrc_ = ssrc;
}
void SrsRtcTrackDescription::set_fec_ssrc(uint32_t ssrc)
{
fec_ssrc_ = ssrc;
}
void SrsRtcTrackDescription::set_mid(std::string mid)
{
mid_ = mid;
}
int SrsRtcTrackDescription::get_rtp_extension_id(std::string uri)
{
for(std::map<int, std::string>::iterator it = extmaps_.begin(); it != extmaps_.end(); ++it) {
if(uri == it->second) {
return it->first;
}
}
return -1;
}
SrsRtcTrackDescription* SrsRtcTrackDescription::copy()
{
SrsRtcTrackDescription* cp = new SrsRtcTrackDescription();
cp->type_ = type_;
cp->id_ = id_;
cp->ssrc_ = ssrc_;
cp->fec_ssrc_ = fec_ssrc_;
cp->rtx_ssrc_ = rtx_ssrc_;
cp->extmaps_ = extmaps_;
cp->direction_ = direction_;
cp->mid_ = mid_;
cp->is_active_ = is_active_;
cp->media_ = media_ ? media_->copy():NULL;
cp->red_ = red_ ? red_->copy():NULL;
cp->rtx_ = rtx_ ? rtx_->copy():NULL;
cp->ulpfec_ = ulpfec_ ? ulpfec_->copy():NULL;
cp->rsfec_ = rsfec_ ? rsfec_->copy():NULL;
return cp;
}
SrsRtcStreamDescription::SrsRtcStreamDescription()
{
audio_track_desc_ = NULL;
}
SrsRtcStreamDescription::~SrsRtcStreamDescription()
{
srs_freep(audio_track_desc_);
for (int i = 0; i < video_track_descs_.size(); ++i) {
srs_freep(video_track_descs_.at(i));
}
video_track_descs_.clear();
}
SrsRtcStreamDescription* SrsRtcStreamDescription::copy()
{
SrsRtcStreamDescription* stream_desc = new SrsRtcStreamDescription();
if (audio_track_desc_) {
stream_desc->audio_track_desc_ = audio_track_desc_->copy();
}
for (int i = 0; i < video_track_descs_.size(); ++i) {
stream_desc->video_track_descs_.push_back(video_track_descs_.at(i)->copy());
}
return stream_desc;
}
SrsRtcTrackDescription* SrsRtcStreamDescription::find_track_description_by_ssrc(uint32_t ssrc)
{
if (audio_track_desc_->has_ssrc(ssrc)) {
return audio_track_desc_;
}
for (int i = 0; i < video_track_descs_.size(); ++i) {
if (video_track_descs_.at(i)->has_ssrc(ssrc)) {
return video_track_descs_.at(i);
}
}
return NULL;
}
ISrsRtcTrack::ISrsRtcTrack()
{
}
ISrsRtcTrack::~ISrsRtcTrack()
{
}
SrsRtcRecvTrack::SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc, bool is_audio)
{
session_ = session;
track_desc_ = track_desc->copy();
if (is_audio) {
rtp_queue_ = new SrsRtpRingBuffer(100);
nack_receiver_ = new SrsRtpNackForReceiver(rtp_queue_, 100 * 2 / 3);
} else {
rtp_queue_ = new SrsRtpRingBuffer(1000);
nack_receiver_ = new SrsRtpNackForReceiver(rtp_queue_, 1000 * 2 / 3);
}
}
SrsRtcRecvTrack::~SrsRtcRecvTrack()
{
srs_freep(rtp_queue_);
srs_freep(nack_receiver_);
srs_freep(track_desc_);
}
bool SrsRtcRecvTrack::has_ssrc(uint32_t ssrc)
{
if (track_desc_) {
return track_desc_->has_ssrc(ssrc);
}
return false;
}
void SrsRtcRecvTrack::update_rtt(int rtt)
{
if (nack_receiver_) {
nack_receiver_->update_rtt(rtt);
}
}
void SrsRtcRecvTrack::update_send_report_time(const SrsNtp& ntp)
{
last_sender_report_ntp = ntp;
last_sender_report_sys_time = srs_update_system_time();;
}
srs_error_t SrsRtcRecvTrack::send_rtcp_rr()
{
srs_error_t err = srs_success;
if (session_) {
return session_->send_rtcp_rr(track_desc_->ssrc_, rtp_queue_, last_sender_report_sys_time, last_sender_report_ntp);
}
return err;
}
srs_error_t SrsRtcRecvTrack::send_rtcp_xr_rrtr()
{
srs_error_t err = srs_success;
if (track_desc_) {
return session_->send_rtcp_xr_rrtr(track_desc_->ssrc_);
}
return err;
}
srs_error_t SrsRtcRecvTrack::on_nack(SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
uint32_t ssrc = pkt->header.get_ssrc();
uint16_t seq = pkt->header.get_sequence();
// TODO: check whether is necessary?
nack_receiver_->remove_timeout_packets();
SrsRtpNackInfo* nack_info = nack_receiver_->find(seq);
if (nack_info) {
// seq had been received.
nack_receiver_->remove(seq);
return err;
}
// insert check nack list
uint16_t nack_first = 0, nack_last = 0;
if (!rtp_queue_->update(seq, nack_first, nack_last)) {
srs_warn("too old seq %u, range [%u, %u]", seq, rtp_queue_->begin, rtp_queue_->end);
}
if (srs_rtp_seq_distance(nack_first, nack_last) > 0) {
srs_trace("update seq=%u, nack range [%u, %u]", seq, nack_first, nack_last);
nack_receiver_->insert(nack_first, nack_last);
nack_receiver_->check_queue_size();
}
// insert into video_queue and audio_queue
rtp_queue_->set(seq, pkt->copy());
// send_nack
session_->check_send_nacks(nack_receiver_, ssrc);
return err;
}
srs_error_t SrsRtcRecvTrack::on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt)
{
return srs_success;
}
SrsRtcAudioRecvTrack::SrsRtcAudioRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcRecvTrack(session, track_desc, true)
{
}
SrsRtcAudioRecvTrack::~SrsRtcAudioRecvTrack()
{
}
srs_error_t SrsRtcAudioRecvTrack::on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
// uint8_t pt = pkt->header.get_payload_type();
// SrsRtcTrackDescription track = rtc_stream_desc_->get_audio_tracks();
// // process red packet.
// if (pt == red_pt) {
// } else if (pt == rtx_pt) { // process rtx_pt.
// // restore retranmission packet.
// } else if (pt == fec_pt) {
// }
if (source) {
if ((err = source->on_rtp(pkt)) != srs_success) {
return srs_error_wrap(err, "source on rtp");
}
}
// For NACK to handle packet.
if ((err = on_nack(pkt)) != srs_success) {
return srs_error_wrap(err, "on nack");
}
return err;
}
SrsRtcVideoRecvTrack::SrsRtcVideoRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcRecvTrack(session, track_desc, false)
{
request_key_frame_ = false;
}
SrsRtcVideoRecvTrack::~SrsRtcVideoRecvTrack()
{
}
srs_error_t SrsRtcVideoRecvTrack::on_rtp(SrsRtcStream* source, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
pkt->frame_type = SrsFrameTypeVideo;
// TODO: FIXME: add rtp process
if (request_key_frame_) {
// TODO: FIXME: add coroutine to request key frame.
request_key_frame_ = false;
// TODO: FIXME: Check error.
session_->send_rtcp_fb_pli(track_desc_->ssrc_);
}
if (source) {
if ((err = source->on_rtp(pkt)) != srs_success) {
return srs_error_wrap(err, "source on rtp");
}
}
// For NACK to handle packet.
if ((err = on_nack(pkt)) != srs_success) {
return srs_error_wrap(err, "on nack");
}
return err;
}
void SrsRtcVideoRecvTrack::request_keyframe()
{
request_key_frame_ = true;
}
SrsRtcSendTrack::SrsRtcSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc, bool is_audio)
{
session_ = session;
track_desc_ = track_desc->copy();
if (is_audio) {
rtp_queue_ = new SrsRtpRingBuffer(100);
} else {
rtp_queue_ = new SrsRtpRingBuffer(1000);
}
}
SrsRtcSendTrack::~SrsRtcSendTrack()
{
srs_freep(rtp_queue_);
srs_freep(track_desc_);
}
bool SrsRtcSendTrack::has_ssrc(uint32_t ssrc)
{
if (track_desc_) {
return track_desc_->has_ssrc(ssrc);
}
return false;
}
SrsRtpPacket2* SrsRtcSendTrack::fetch_rtp_packet(uint16_t seq)
{
if (rtp_queue_) {
return rtp_queue_->at(seq);
}
return NULL;
}
srs_error_t SrsRtcSendTrack::on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt)
{
return srs_success;
}
srs_error_t SrsRtcSendTrack::on_rtcp(SrsRtpPacket2* pkt)
{
return srs_success;
}
SrsRtcAudioSendTrack::SrsRtcAudioSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcSendTrack(session, track_desc, true)
{
}
SrsRtcAudioSendTrack::~SrsRtcAudioSendTrack()
{
}
srs_error_t SrsRtcAudioSendTrack::on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
pkt->header.set_ssrc(track_desc_->ssrc_);
pkt->header.set_payload_type(track_desc_->media_->pt_);
// Put rtp packet to NACK/ARQ queue
if (true) {
SrsRtpPacket2* nack = pkt->copy();
rtp_queue_->set(nack->header.get_sequence(), nack);
}
send_packets.push_back(pkt);
return err;
}
srs_error_t SrsRtcAudioSendTrack::on_rtcp(SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
// process rtcp
return err;
}
SrsRtcVideoSendTrack::SrsRtcVideoSendTrack(SrsRtcConnection* session, SrsRtcTrackDescription* track_desc)
: SrsRtcSendTrack(session, track_desc, false)
{
}
SrsRtcVideoSendTrack::~SrsRtcVideoSendTrack()
{
}
srs_error_t SrsRtcVideoSendTrack::on_rtp(std::vector<SrsRtpPacket2*>& send_packets, SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
pkt->header.set_ssrc(track_desc_->ssrc_);
pkt->header.set_payload_type(track_desc_->media_->pt_);
// Put rtp packet to NACK/ARQ queue
if (true) {
SrsRtpPacket2* nack = pkt->copy();
rtp_queue_->set(nack->header.get_sequence(), nack);
}
send_packets.push_back(pkt);
return err;
}
srs_error_t SrsRtcVideoSendTrack::on_rtcp(SrsRtpPacket2* pkt)
{
srs_error_t err = srs_success;
// process rtcp
return err;
}