1
0
Fork 0
mirror of https://github.com/ossrs/srs.git synced 2025-03-09 15:49:59 +00:00

For #307, package video rtp packets when send message

This commit is contained in:
winlin 2020-04-11 21:03:37 +08:00
parent 8121f9ab4e
commit a2fdf0d3c7
8 changed files with 236 additions and 200 deletions

View file

@ -89,7 +89,6 @@ srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFor
SrsRtpH264Muxer::SrsRtpH264Muxer()
{
sequence = 0;
discard_bframe = false;
}
@ -97,28 +96,16 @@ SrsRtpH264Muxer::~SrsRtpH264Muxer()
{
}
srs_error_t SrsRtpH264Muxer::frame_to_packet(SrsSharedPtrMessage* shared_frame, SrsFormat* format)
srs_error_t SrsRtpH264Muxer::filter(SrsSharedPtrMessage* shared_frame, SrsFormat* format)
{
srs_error_t err = srs_success;
if (format->is_avc_sequence_header()) {
// It is ok when size is 0, @see http://www.cplusplus.com/reference/string/string/assign/
sps.assign(format->vcodec->sequenceParameterSetNALUnit.data(), format->vcodec->sequenceParameterSetNALUnit.size());
pps.assign(format->vcodec->pictureParameterSetNALUnit.data(), format->vcodec->pictureParameterSetNALUnit.size());
// only collect SPS/PPS.
return err;
}
vector<SrsRtpSharedPacket*> rtp_packets;
// Well, for each IDR, we append a SPS/PPS before it, which is packaged in STAP-A.
// If IDR, we will insert SPS/PPS before IDR frame.
if (format->video && format->video->has_idr) {
if ((err = packet_stap_a(sps, pps, shared_frame, rtp_packets)) != srs_success) {
return srs_error_wrap(err, "packet stap-a");
}
shared_frame->set_has_idr(true);
}
// Update samples to shared frame.
for (int i = 0; i < format->video->nb_samples; ++i) {
SrsSample* sample = &format->video->samples[i];
@ -132,128 +119,13 @@ srs_error_t SrsRtpH264Muxer::frame_to_packet(SrsSharedPtrMessage* shared_frame,
continue;
}
}
if (sample->size <= kRtpMaxPayloadSize) {
if ((err = packet_single_nalu(shared_frame, format, sample, rtp_packets)) != srs_success) {
return srs_error_wrap(err, "packet single nalu");
}
} else {
if ((err = packet_fu_a(shared_frame, format, sample, rtp_packets)) != srs_success) {
return srs_error_wrap(err, "packet fu-a");
}
}
}
if (!rtp_packets.empty()) {
// At the end of the frame, set marker bit.
// One frame may have multi nals. Set the marker bit in the last nal end, no the end of the nal.
if ((err = rtp_packets.back()->modify_rtp_header_marker(true)) != srs_success) {
return srs_error_wrap(err, "set marker");
}
if (format->video->nb_samples <= 0) {
return err;
}
shared_frame->set_rtp_packets(rtp_packets);
return err;
}
srs_error_t SrsRtpH264Muxer::packet_fu_a(SrsSharedPtrMessage* shared_frame, SrsFormat* format, SrsSample* sample, vector<SrsRtpSharedPacket*>& rtp_packets)
{
srs_error_t err = srs_success;
char* p = sample->bytes + 1;
int nb_left = sample->size - 1;
uint8_t header = sample->bytes[0];
uint8_t nal_type = header & kNalTypeMask;
int num_of_packet = (sample->size - 1 + kRtpMaxPayloadSize) / kRtpMaxPayloadSize;
for (int i = 0; i < num_of_packet; ++i) {
char buf[kRtpPacketSize];
SrsBuffer* stream = new SrsBuffer(buf, kRtpPacketSize);
SrsAutoFree(SrsBuffer, stream);
int packet_size = min(nb_left, kRtpMaxPayloadSize);
// fu-indicate
uint8_t fu_indicate = kFuA;
fu_indicate |= (header & (~kNalTypeMask));
stream->write_1bytes(fu_indicate);
uint8_t fu_header = nal_type;
if (i == 0)
fu_header |= kStart;
if (i == num_of_packet - 1)
fu_header |= kEnd;
stream->write_1bytes(fu_header);
stream->write_bytes(p, packet_size);
p += packet_size;
nb_left -= packet_size;
srs_verbose("rtp fu-a nalu, size=%u, seq=%u, timestamp=%lu", sample->size, sequence, (shared_frame->timestamp * 90));
SrsRtpSharedPacket* packet = new SrsRtpSharedPacket();
if ((err = packet->create((shared_frame->timestamp * 90), sequence++, kVideoSSRC, kH264PayloadType, stream->data(), stream->pos())) != srs_success) {
return srs_error_wrap(err, "rtp packet encode");
}
rtp_packets.push_back(packet);
}
return err;
}
// Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6
srs_error_t SrsRtpH264Muxer::packet_single_nalu(SrsSharedPtrMessage* shared_frame, SrsFormat* format, SrsSample* sample, vector<SrsRtpSharedPacket*>& rtp_packets)
{
srs_error_t err = srs_success;
srs_verbose("rtp single nalu, size=%u, seq=%u, timestamp=%lu", sample->size, sequence, (shared_frame->timestamp * 90));
SrsRtpSharedPacket* packet = new SrsRtpSharedPacket();
if ((err = packet->create((shared_frame->timestamp * 90), sequence++, kVideoSSRC, kH264PayloadType, sample->bytes, sample->size)) != srs_success) {
return srs_error_wrap(err, "rtp packet encode");
}
rtp_packets.push_back(packet);
return err;
}
srs_error_t SrsRtpH264Muxer::packet_stap_a(const string &sps, const string& pps, SrsSharedPtrMessage* shared_frame, vector<SrsRtpSharedPacket*>& rtp_packets)
{
srs_error_t err = srs_success;
if (sps.empty() || pps.empty()) {
return srs_error_new(ERROR_RTC_RTP_MUXER, "sps/pps empty");
}
uint8_t header = sps[0];
uint8_t nal_type = header & kNalTypeMask;
char buf[kRtpPacketSize];
SrsBuffer* stream = new SrsBuffer(buf, kRtpPacketSize);
SrsAutoFree(SrsBuffer, stream);
// stap-a header
uint8_t stap_a_header = kStapA;
stap_a_header |= (nal_type & (~kNalTypeMask));
stream->write_1bytes(stap_a_header);
stream->write_2bytes(sps.size());
stream->write_bytes((char*)sps.data(), sps.size());
stream->write_2bytes(pps.size());
stream->write_bytes((char*)pps.data(), pps.size());
srs_verbose("rtp stap-a nalu, size=%u, seq=%u, timestamp=%lu", (sps.size() + pps.size()), sequence, (shared_frame->timestamp * 90));
SrsRtpSharedPacket* packet = new SrsRtpSharedPacket();
if ((err = packet->create((shared_frame->timestamp * 90), sequence++, kVideoSSRC, kH264PayloadType, stream->data(), stream->pos())) != srs_success) {
return srs_error_wrap(err, "rtp packet encode");
}
rtp_packets.push_back(packet);
shared_frame->set_samples(format->video->samples, format->video->nb_samples);
return err;
}
@ -490,5 +362,5 @@ srs_error_t SrsRtc::on_video(SrsSharedPtrMessage* shared_video, SrsFormat* forma
// ignore info frame,
// @see https://github.com/ossrs/srs/issues/288#issuecomment-69863909
srs_assert(format->video);
return rtp_h264_muxer->frame_to_packet(shared_video, format);
return rtp_h264_muxer->filter(shared_video, format);
}

View file

@ -69,21 +69,13 @@ const uint32_t kVideoSSRC = 2;
// TODO: Define interface class like ISrsRtpMuxer
class SrsRtpH264Muxer
{
private:
uint16_t sequence;
std::string sps;
std::string pps;
public:
bool discard_bframe;
public:
SrsRtpH264Muxer();
virtual ~SrsRtpH264Muxer();
public:
srs_error_t frame_to_packet(SrsSharedPtrMessage* shared_video, SrsFormat* format);
private:
srs_error_t packet_fu_a(SrsSharedPtrMessage* shared_frame, SrsFormat* format, SrsSample* sample, std::vector<SrsRtpSharedPacket*>& rtp_packets);
srs_error_t packet_single_nalu(SrsSharedPtrMessage* shared_frame, SrsFormat* format, SrsSample* sample, std::vector<SrsRtpSharedPacket*>& rtp_packets);
srs_error_t packet_stap_a(const std::string &sps, const std::string& pps, SrsSharedPtrMessage* shared_frame, std::vector<SrsRtpSharedPacket*>& rtp_packets);
srs_error_t filter(SrsSharedPtrMessage* shared_video, SrsFormat* format);
};
// TODO: FIXME: It's not a muxer, but a transcoder.

View file

@ -445,6 +445,8 @@ SrsRtcSenderThread::SrsRtcSenderThread(SrsRtcSession* s, SrsUdpMuxSocket* u, int
audio_timestamp = 0;
audio_sequence = 0;
video_sequence = 0;
}
SrsRtcSenderThread::~SrsRtcSenderThread()
@ -557,7 +559,14 @@ srs_error_t SrsRtcSenderThread::cycle()
int nn = 0;
int nn_rtp_pkts = 0;
send_and_free_messages(msgs.msgs, msg_count, sendonly_ukt, &nn, &nn_rtp_pkts);
if ((err = send_messages(source, msgs.msgs, msg_count, sendonly_ukt, &nn, &nn_rtp_pkts)) != srs_success) {
srs_warn("send err %s", srs_error_summary(err).c_str()); srs_error_reset(err);
}
for (int i = 0; i < msg_count; i++) {
SrsSharedPtrMessage* msg = msgs.msgs[i];
srs_freep(msg);
}
pprint->elapse();
if (pprint->can_print()) {
@ -576,12 +585,14 @@ void SrsRtcSenderThread::update_sendonly_socket(SrsUdpMuxSocket* skt)
sendonly_ukt = skt->copy_sendonly();
}
void SrsRtcSenderThread::send_and_free_messages(SrsSharedPtrMessage** msgs, int nb_msgs, SrsUdpMuxSocket* skt, int* pnn, int* pnn_rtp_pkts)
{
srs_error_t SrsRtcSenderThread::send_messages(
SrsSource* source, SrsSharedPtrMessage** msgs, int nb_msgs,
SrsUdpMuxSocket* skt, int* pnn, int* pnn_rtp_pkts
) {
srs_error_t err = srs_success;
if (!rtc_session->dtls_session) {
return;
return err;
}
for (int i = 0; i < nb_msgs; i++) {
@ -589,41 +600,69 @@ void SrsRtcSenderThread::send_and_free_messages(SrsSharedPtrMessage** msgs, int
bool is_video = msg->is_video();
bool is_audio = msg->is_audio();
if (is_audio) {
// Package opus packets to RTP packets.
vector<SrsRtpSharedPacket*> rtp_packets;
// Package opus packets to RTP packets.
vector<SrsRtpSharedPacket*> rtp_packets;
if (is_audio) {
for (int i = 0; i < msg->nn_extra_payloads(); i++) {
SrsSample* sample = msg->extra_payloads() + i;
if ((err = packet_opus(msg, sample, rtp_packets)) != srs_success) {
srs_warn("packet opus err %s", srs_error_summary(err).c_str()); srs_error_reset(err);
return srs_error_wrap(err, "opus package");
}
}
} else {
for (int i = 0; i < msg->nn_samples(); i++) {
SrsSample* sample = msg->samples() + i;
// We always ignore bframe here, if config to discard bframe,
// the bframe flag will not be set.
if (sample->bframe) {
continue;
}
// Well, for each IDR, we append a SPS/PPS before it, which is packaged in STAP-A.
if (msg->has_idr()) {
if ((err = packet_stap_a(source, msg, rtp_packets)) != srs_success) {
return srs_error_wrap(err, "packet stap-a");
}
}
if (sample->size <= kRtpMaxPayloadSize) {
if ((err = packet_single_nalu(msg, sample, rtp_packets)) != srs_success) {
return srs_error_wrap(err, "packet single nalu");
}
} else {
if ((err = packet_fu_a(msg, sample, rtp_packets)) != srs_success) {
return srs_error_wrap(err, "packet fu-a");
}
}
}
int nn_rtp_pkts = (int)rtp_packets.size();
for (int j = 0; j < nn_rtp_pkts; j++) {
SrsRtpSharedPacket* pkt = rtp_packets[j];
send_and_free_message(msg, is_video, is_audio, pkt, skt);
if (!rtp_packets.empty()) {
// At the end of the frame, set marker bit.
// One frame may have multi nals. Set the marker bit in the last nal end, no the end of the nal.
if ((err = rtp_packets.back()->modify_rtp_header_marker(true)) != srs_success) {
return srs_error_wrap(err, "set marker");
}
}
*pnn += msg->size;
*pnn_rtp_pkts += nn_rtp_pkts;
} else {
int nn_rtp_pkts = (int)msg->rtp_packets.size();
for (int j = 0; j < nn_rtp_pkts; j++) {
SrsRtpSharedPacket* pkt = msg->rtp_packets[j];
send_and_free_message(msg, is_video, is_audio, pkt, skt);
}
*pnn += msg->size;
*pnn_rtp_pkts += nn_rtp_pkts;
}
srs_freep(msg);
int nn_rtp_pkts = (int)rtp_packets.size();
for (int j = 0; j < nn_rtp_pkts; j++) {
SrsRtpSharedPacket* pkt = rtp_packets[j];
if ((err = send_message(msg, is_video, is_audio, pkt, skt)) != srs_success) {
return srs_error_wrap(err, "send message");
}
}
*pnn += msg->size;
*pnn_rtp_pkts += nn_rtp_pkts;
}
return err;
}
void SrsRtcSenderThread::send_and_free_message(SrsSharedPtrMessage* msg, bool is_video, bool is_audio, SrsRtpSharedPacket* pkt, SrsUdpMuxSocket* skt)
srs_error_t SrsRtcSenderThread::send_message(SrsSharedPtrMessage* msg, bool is_video, bool is_audio, SrsRtpSharedPacket* pkt, SrsUdpMuxSocket* skt)
{
srs_error_t err = srs_success;
@ -644,8 +683,7 @@ void SrsRtcSenderThread::send_and_free_message(SrsSharedPtrMessage* msg, bool is
if (rtc_session->encrypt) {
if ((err = rtc_session->dtls_session->protect_rtp(buf, pkt->payload, length)) != srs_success) {
srs_warn("srtp err %s", srs_error_desc(err).c_str()); srs_freep(err); srs_freepa(buf);
return;
return srs_error_wrap(err, "srtp protect");
}
} else {
memcpy(buf, pkt->payload, length);
@ -660,6 +698,7 @@ void SrsRtcSenderThread::send_and_free_message(SrsSharedPtrMessage* msg, bool is
mhdr->msg_len = 0;
rtc_session->rtc_server->sendmmsg(skt->stfd(), mhdr);
return err;
}
srs_error_t SrsRtcSenderThread::packet_opus(SrsSharedPtrMessage* shared_frame, SrsSample* sample, std::vector<SrsRtpSharedPacket*>& rtp_packets)
@ -680,6 +719,119 @@ srs_error_t SrsRtcSenderThread::packet_opus(SrsSharedPtrMessage* shared_frame, S
return err;
}
srs_error_t SrsRtcSenderThread::packet_fu_a(SrsSharedPtrMessage* shared_frame, SrsSample* sample, vector<SrsRtpSharedPacket*>& rtp_packets)
{
srs_error_t err = srs_success;
char* p = sample->bytes + 1;
int nb_left = sample->size - 1;
uint8_t header = sample->bytes[0];
uint8_t nal_type = header & kNalTypeMask;
int num_of_packet = (sample->size - 1 + kRtpMaxPayloadSize) / kRtpMaxPayloadSize;
for (int i = 0; i < num_of_packet; ++i) {
char buf[kRtpPacketSize];
SrsBuffer* stream = new SrsBuffer(buf, kRtpPacketSize);
SrsAutoFree(SrsBuffer, stream);
int packet_size = min(nb_left, kRtpMaxPayloadSize);
// fu-indicate
uint8_t fu_indicate = kFuA;
fu_indicate |= (header & (~kNalTypeMask));
stream->write_1bytes(fu_indicate);
uint8_t fu_header = nal_type;
if (i == 0)
fu_header |= kStart;
if (i == num_of_packet - 1)
fu_header |= kEnd;
stream->write_1bytes(fu_header);
stream->write_bytes(p, packet_size);
p += packet_size;
nb_left -= packet_size;
srs_verbose("rtp fu-a nalu, size=%u, seq=%u, timestamp=%lu", sample->size, video_sequence, (shared_frame->timestamp * 90));
SrsRtpSharedPacket* packet = new SrsRtpSharedPacket();
if ((err = packet->create((shared_frame->timestamp * 90), video_sequence++, kVideoSSRC, kH264PayloadType, stream->data(), stream->pos())) != srs_success) {
return srs_error_wrap(err, "rtp packet encode");
}
rtp_packets.push_back(packet);
}
return err;
}
// Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6
srs_error_t SrsRtcSenderThread::packet_single_nalu(SrsSharedPtrMessage* shared_frame, SrsSample* sample, vector<SrsRtpSharedPacket*>& rtp_packets)
{
srs_error_t err = srs_success;
srs_verbose("rtp single nalu, size=%u, seq=%u, timestamp=%lu", sample->size, video_sequence, (shared_frame->timestamp * 90));
SrsRtpSharedPacket* packet = new SrsRtpSharedPacket();
if ((err = packet->create((shared_frame->timestamp * 90), video_sequence++, kVideoSSRC, kH264PayloadType, sample->bytes, sample->size)) != srs_success) {
return srs_error_wrap(err, "rtp packet encode");
}
rtp_packets.push_back(packet);
return err;
}
srs_error_t SrsRtcSenderThread::packet_stap_a(SrsSource* source, SrsSharedPtrMessage* shared_frame, vector<SrsRtpSharedPacket*>& rtp_packets)
{
srs_error_t err = srs_success;
SrsMetaCache* meta = source->cached_meta();
if (!meta) {
return err;
}
SrsFormat* format = meta->vsh_format();
if (!format || !format->vcodec) {
return err;
}
const vector<char>& sps = format->vcodec->sequenceParameterSetNALUnit;
const vector<char>& pps = format->vcodec->pictureParameterSetNALUnit;
if (sps.empty() || pps.empty()) {
return srs_error_new(ERROR_RTC_RTP_MUXER, "sps/pps empty");
}
uint8_t header = sps[0];
uint8_t nal_type = header & kNalTypeMask;
char buf[kRtpPacketSize];
SrsBuffer* stream = new SrsBuffer(buf, kRtpPacketSize);
SrsAutoFree(SrsBuffer, stream);
// stap-a header
uint8_t stap_a_header = kStapA;
stap_a_header |= (nal_type & (~kNalTypeMask));
stream->write_1bytes(stap_a_header);
stream->write_2bytes(sps.size());
stream->write_bytes((char*)sps.data(), sps.size());
stream->write_2bytes(pps.size());
stream->write_bytes((char*)pps.data(), pps.size());
srs_verbose("rtp stap-a nalu, size=%u, seq=%u, timestamp=%lu", (sps.size() + pps.size()), video_sequence, (shared_frame->timestamp * 90));
SrsRtpSharedPacket* packet = new SrsRtpSharedPacket();
if ((err = packet->create((shared_frame->timestamp * 90), video_sequence++, kVideoSSRC, kH264PayloadType, stream->data(), stream->pos())) != srs_success) {
return srs_error_wrap(err, "rtp packet encode");
}
rtp_packets.push_back(packet);
return err;
}
SrsRtcSession::SrsRtcSession(SrsRtcServer* rtc_svr, const SrsRequest& req, const std::string& un, int context_id)
{
rtc_server = rtc_svr;

View file

@ -130,6 +130,8 @@ private:
// TODO: FIXME: How to handle timestamp overflow?
uint32_t audio_timestamp;
uint16_t audio_sequence;
private:
uint16_t video_sequence;
public:
SrsUdpMuxSocket* sendonly_ukt;
public:
@ -148,10 +150,14 @@ public:
public:
void update_sendonly_socket(SrsUdpMuxSocket* skt);
private:
void send_and_free_messages(SrsSharedPtrMessage** msgs, int nb_msgs, SrsUdpMuxSocket* skt, int* pnn, int* pnn_rtp_pkts);
void send_and_free_message(SrsSharedPtrMessage* msg, bool is_video, bool is_audio, SrsRtpSharedPacket* pkt, SrsUdpMuxSocket* skt);
srs_error_t send_messages(SrsSource* source, SrsSharedPtrMessage** msgs, int nb_msgs, SrsUdpMuxSocket* skt, int* pnn, int* pnn_rtp_pkts);
srs_error_t send_message(SrsSharedPtrMessage* msg, bool is_video, bool is_audio, SrsRtpSharedPacket* pkt, SrsUdpMuxSocket* skt);
private:
srs_error_t packet_opus(SrsSharedPtrMessage* shared_frame, SrsSample* sample, std::vector<SrsRtpSharedPacket*>& rtp_packets);
private:
srs_error_t packet_fu_a(SrsSharedPtrMessage* shared_frame, SrsSample* sample, std::vector<SrsRtpSharedPacket*>& rtp_packets);
srs_error_t packet_single_nalu(SrsSharedPtrMessage* shared_frame, SrsSample* sample, std::vector<SrsRtpSharedPacket*>& rtp_packets);
srs_error_t packet_stap_a(SrsSource* source, SrsSharedPtrMessage* shared_frame, std::vector<SrsRtpSharedPacket*>& rtp_packets);
};
class SrsRtcSession

View file

@ -1143,7 +1143,8 @@ srs_error_t SrsOriginHub::on_video(SrsSharedPtrMessage* shared_video, bool is_se
// TODO: FIXME: Refactor to move to rtp?
// Save the RTP packets for find_rtp_packet() to rtx or restore it.
source->rtp_queue->push(msg->rtp_packets);
// TODO: FIXME: Remove dead code.
//source->rtp_queue->push(msg->rtp_packets);
#endif
if ((err = hls->on_video(msg, format)) != srs_success) {
@ -2718,4 +2719,9 @@ SrsRtpSharedPacket* SrsSource::find_rtp_packet(const uint16_t& seq)
{
return rtp_queue->find(seq);
}
SrsMetaCache* SrsSource::cached_meta()
{
return meta;
}
#endif

View file

@ -327,6 +327,7 @@ public:
#ifdef SRS_AUTO_RTC
// To find the RTP packet for RTX or restore.
// TODO: FIXME: Should queue RTP packets in connection level.
class SrsRtpPacketQueue
{
private:
@ -634,6 +635,8 @@ public:
#ifdef SRS_AUTO_RTC
// Find rtp packet by sequence
SrsRtpSharedPacket* find_rtp_packet(const uint16_t& seq);
// Get the cached meta, as such the sps/pps.
SrsMetaCache* cached_meta();
#endif
};

View file

@ -211,11 +211,14 @@ SrsSharedPtrMessage::SrsSharedPtrPayload::SrsSharedPtrPayload()
size = 0;
shared_count = 0;
#ifdef SRS_AUTO_RTC
samples = NULL;
nb_samples = 0;
nn_samples = 0;
has_idr = false;
extra_payloads = NULL;
nn_extra_payloads = 0;
#endif
}
SrsSharedPtrMessage::SrsSharedPtrPayload::~SrsSharedPtrPayload()
@ -226,12 +229,14 @@ SrsSharedPtrMessage::SrsSharedPtrPayload::~SrsSharedPtrPayload()
srs_freepa(payload);
srs_freepa(samples);
#ifdef SRS_AUTO_RTC
for (int i = 0; i < nn_extra_payloads; i++) {
SrsSample* p = extra_payloads + i;
srs_freep(p->bytes);
}
srs_freepa(extra_payloads);
nn_extra_payloads = 0;
#endif
}
SrsSharedPtrMessage::SrsSharedPtrMessage() : timestamp(0), stream_id(0), size(0), payload(NULL)
@ -248,12 +253,6 @@ SrsSharedPtrMessage::~SrsSharedPtrMessage()
ptr->shared_count--;
}
}
#ifdef SRS_AUTO_RTC
for (int i = 0; i < (int)rtp_packets.size(); ++i) {
srs_freep(rtp_packets[i]);
}
#endif
}
srs_error_t SrsSharedPtrMessage::create(SrsCommonMessage* msg)
@ -372,21 +371,10 @@ SrsSharedPtrMessage* SrsSharedPtrMessage::copy()
copy->payload = ptr->payload;
copy->size = ptr->size;
#ifdef SRS_AUTO_RTC
for (int i = 0; i < (int)rtp_packets.size(); ++i) {
copy->rtp_packets.push_back(rtp_packets[i]->copy());
}
#endif
return copy;
}
#ifdef SRS_AUTO_RTC
void SrsSharedPtrMessage::set_rtp_packets(const std::vector<SrsRtpSharedPacket*>& pkts)
{
rtp_packets = pkts;
}
void SrsSharedPtrMessage::set_extra_payloads(SrsSample* payloads, int nn_payloads)
{
srs_assert(nn_payloads);
@ -397,6 +385,17 @@ void SrsSharedPtrMessage::set_extra_payloads(SrsSample* payloads, int nn_payload
ptr->extra_payloads = new SrsSample[nn_payloads];
memcpy(ptr->extra_payloads, payloads, nn_payloads * sizeof(SrsSample));
}
void SrsSharedPtrMessage::set_samples(SrsSample* samples, int nn_samples)
{
srs_assert(nn_samples);
srs_assert(!ptr->samples);
ptr->nn_samples = nn_samples;
ptr->samples = new SrsSample[nn_samples];
memcpy(ptr->samples, samples, nn_samples * sizeof(SrsSample));
}
#endif
SrsFlvTransmuxer::SrsFlvTransmuxer()

View file

@ -288,9 +288,6 @@ public:
// @remark, not all message payload can be decoded to packet. for example,
// video/audio packet use raw bytes, no video/audio packet.
char* payload;
#ifdef SRS_AUTO_RTC
std::vector<SrsRtpSharedPacket*> rtp_packets;
#endif
private:
class SrsSharedPtrPayload
@ -305,15 +302,19 @@ private:
int size;
// The reference count
int shared_count;
#ifdef SRS_AUTO_RTC
public:
// For RTC video, we need to know the NALU structures,
// because the RTP STAP-A or FU-A based on NALU.
SrsSample* samples;
int nb_samples;
int nn_samples;
// For RTC video, whether NALUs has IDR.
bool has_idr;
// For RTC audio, we may need to transcode AAC to opus,
// so there must be an extra payloads, which is transformed from payload.
SrsSample* extra_payloads;
int nn_extra_payloads;
#endif
public:
SrsSharedPtrPayload();
virtual ~SrsSharedPtrPayload();
@ -357,13 +358,18 @@ public:
virtual SrsSharedPtrMessage* copy();
public:
#ifdef SRS_AUTO_RTC
virtual void set_rtp_packets(const std::vector<SrsRtpSharedPacket*>& pkts);
// Set extra samples, for example, when we transcode an AAC audio packet to OPUS,
// we may get more than one OPUS packets, we set these OPUS packets in extra payloads.
void set_extra_payloads(SrsSample* payloads, int nn_payloads);
// Get the extra payloads and the number of it.
int nn_extra_payloads() { return ptr->nn_extra_payloads; }
SrsSample* extra_payloads() { return ptr->extra_payloads; }
// Whether samples has idr.
bool has_idr() { return ptr->has_idr; }
void set_has_idr(bool v) { ptr->has_idr = v; }
// Set samples, each sample points to the address of payload.
void set_samples(SrsSample* samples, int nn_samples);
int nn_samples() { return ptr->nn_samples; }
SrsSample* samples() { return ptr->samples; }
#endif
};