1
0
Fork 0
mirror of https://github.com/ossrs/srs.git synced 2025-03-09 15:49:59 +00:00

add ts demux

This commit is contained in:
runner365 2020-01-17 19:43:54 +08:00
parent 1481928b53
commit 7b9e3ecdc4
13 changed files with 1058 additions and 15 deletions

View file

@ -0,0 +1,427 @@
#include "srt_to_rtmp.hpp"
#include <srs_kernel_log.hpp>
#include <srs_kernel_error.hpp>
#include <srs_kernel_buffer.hpp>
#include <srs_app_rtmp_conn.hpp>
#include <srs_app_config.hpp>
#include <srs_kernel_stream.hpp>
srt2rtmp::srt2rtmp():_run_flag(false) {
}
srt2rtmp::~srt2rtmp() {
}
void srt2rtmp::start() {
_run_flag = true;
_thread_ptr = std::make_shared<std::thread>(&srt2rtmp::on_work, this);
return;
}
void srt2rtmp::stop() {
_run_flag = false;
_thread_ptr->join();
return;
}
void srt2rtmp::insert_data_message(unsigned char* data_p, unsigned int len, const std::string& key_path) {
std::unique_lock<std::mutex> locker(_mutex);
if (!_run_flag) {
return;
}
SRT_DATA_MSG_PTR msg_ptr = std::make_shared<SRT_DATA_MSG>(data_p, len, key_path);
_msg_queue.push(msg_ptr);
_notify_cond.notify_one();
return;
}
SRT_DATA_MSG_PTR srt2rtmp::get_data_message() {
std::unique_lock<std::mutex> locker(_mutex);
SRT_DATA_MSG_PTR msg_ptr;
while (_msg_queue.empty() && _run_flag) {
_notify_cond.wait(locker);
}
msg_ptr = _msg_queue.front();
_msg_queue.pop();
return msg_ptr;
}
void srt2rtmp::on_work() {
while(_run_flag) {
SRT_DATA_MSG_PTR msg_ptr = get_data_message();
if (!msg_ptr) {
continue;
}
handle_ts_data(msg_ptr);
}
}
void srt2rtmp::handle_ts_data(SRT_DATA_MSG_PTR data_ptr) {
RTMP_CLIENT_PTR rtmp_ptr;
auto iter = _rtmp_client_map.find(data_ptr->get_path());
if (iter == _rtmp_client_map.end()) {
srs_trace("new rtmp client for srt upstream, key_path:%s", data_ptr->get_path().c_str());
rtmp_ptr = std::make_shared<rtmp_client>(data_ptr->get_path());
_rtmp_client_map.insert(std::make_pair(data_ptr->get_path(), rtmp_ptr));
} else {
rtmp_ptr = iter->second;
}
rtmp_ptr->receive_ts_data(data_ptr);
return;
}
rtmp_client::rtmp_client(std::string key_path):_key_path(key_path) {
_ts_ctx_ptr = std::make_shared<SrsTsContext>();
_avc_ptr = std::make_shared<SrsRawH264Stream>();
_aac_ptr = std::make_shared<SrsRawAacStream>();
char url_sz[128];
sprintf(url_sz, "rtmp://127.0.0.1/%s", key_path.c_str());
_url = url_sz;
_h264_sps_changed = false;
_h264_pps_changed = false;
_h264_sps_pps_sent = false;
srs_trace("rtmp client construct url:%s", url_sz);
}
rtmp_client::~rtmp_client() {
}
void rtmp_client::close() {
if (!_rtmp_conn_ptr) {
return;
}
_rtmp_conn_ptr->close();
_rtmp_conn_ptr = nullptr;
}
srs_error_t rtmp_client::connect() {
srs_error_t err = srs_success;
srs_utime_t cto = SRS_CONSTS_RTMP_TIMEOUT;
srs_utime_t sto = SRS_CONSTS_RTMP_PULSE;
if (_rtmp_conn_ptr.get() != nullptr) {
return srs_error_wrap(err, "repeated connect %s failed, cto=%dms, sto=%dms.",
_url.c_str(), srsu2msi(cto), srsu2msi(sto));
}
_rtmp_conn_ptr = std::make_shared<SrsSimpleRtmpClient>(_url, cto, sto);
if ((err = _rtmp_conn_ptr->connect()) != srs_success) {
close();
return srs_error_wrap(err, "connect %s failed, cto=%dms, sto=%dms.",
_url.c_str(), srsu2msi(cto), srsu2msi(sto));
}
if ((err = _rtmp_conn_ptr->publish(SRS_CONSTS_RTMP_PROTOCOL_CHUNK_SIZE)) != srs_success) {
close();
return srs_error_wrap(err, "publish error, url:%s", _url.c_str());
}
return err;
}
void rtmp_client::receive_ts_data(SRT_DATA_MSG_PTR data_ptr) {
SrsBuffer* buffer_p = new SrsBuffer((char*)data_ptr->get_data(), data_ptr->data_len());
FILE* file_p = fopen("1.ts", "ab+");
if (file_p) {
fwrite(data_ptr->get_data(), data_ptr->data_len(), 1, file_p);
fclose(file_p);
}
//srs_trace_data((char*)data_ptr->get_data(), data_ptr->data_len(), "receive ts data");
_ts_ctx_ptr->decode(buffer_p, this);//on_ts_message is the decode callback
return;
}
srs_error_t rtmp_client::write_h264_sps_pps(uint32_t dts, uint32_t pts) {
srs_error_t err = srs_success;
// TODO: FIMXE: there exists bug, see following comments.
// when sps or pps changed, update the sequence header,
// for the pps maybe not changed while sps changed.
// so, we must check when each video ts message frame parsed.
if (!_h264_sps_changed || !_h264_pps_changed) {
return err;
}
// h264 raw to h264 packet.
std::string sh;
if ((err = _avc_ptr->mux_sequence_header(_h264_sps, _h264_pps, dts, pts, sh)) != srs_success) {
return srs_error_wrap(err, "mux sequence header");
}
// h264 packet to flv packet.
int8_t frame_type = SrsVideoAvcFrameTypeKeyFrame;
int8_t avc_packet_type = SrsVideoAvcFrameTraitSequenceHeader;
char* flv = NULL;
int nb_flv = 0;
if ((err = _avc_ptr->mux_avc2flv(sh, frame_type, avc_packet_type, dts, pts, &flv, &nb_flv)) != srs_success) {
return srs_error_wrap(err, "avc to flv");
}
// the timestamp in rtmp message header is dts.
uint32_t timestamp = dts;
if ((err = rtmp_write_packet(SrsFrameTypeVideo, timestamp, flv, nb_flv)) != srs_success) {
return srs_error_wrap(err, "write packet");
}
// reset sps and pps.
_h264_sps_changed = false;
_h264_pps_changed = false;
_h264_sps_pps_sent = true;
return err;
}
srs_error_t rtmp_client::write_h264_ipb_frame(char* frame, int frame_size, uint32_t dts, uint32_t pts) {
srs_error_t err = srs_success;
// when sps or pps not sent, ignore the packet.
// @see https://github.com/ossrs/srs/issues/203
if (!_h264_sps_pps_sent) {
return srs_error_new(ERROR_H264_DROP_BEFORE_SPS_PPS, "drop sps/pps");
}
// 5bits, 7.3.1 NAL unit syntax,
// ISO_IEC_14496-10-AVC-2003.pdf, page 44.
// 7: SPS, 8: PPS, 5: I Frame, 1: P Frame
SrsAvcNaluType nal_unit_type = (SrsAvcNaluType)(frame[0] & 0x1f);
// for IDR frame, the frame is keyframe.
SrsVideoAvcFrameType frame_type = SrsVideoAvcFrameTypeInterFrame;
if (nal_unit_type == SrsAvcNaluTypeIDR) {
frame_type = SrsVideoAvcFrameTypeKeyFrame;
}
std::string ibp;
if ((err = _avc_ptr->mux_ipb_frame(frame, frame_size, ibp)) != srs_success) {
return srs_error_wrap(err, "mux frame");
}
int8_t avc_packet_type = SrsVideoAvcFrameTraitNALU;
char* flv = NULL;
int nb_flv = 0;
if ((err = _avc_ptr->mux_avc2flv(ibp, frame_type, avc_packet_type, dts, pts, &flv, &nb_flv)) != srs_success) {
return srs_error_wrap(err, "mux avc to flv");
}
// the timestamp in rtmp message header is dts.
uint32_t timestamp = dts;
return rtmp_write_packet(SrsFrameTypeVideo, timestamp, flv, nb_flv);
}
srs_error_t rtmp_client::write_audio_raw_frame(char* frame, int frame_size, SrsRawAacStreamCodec* codec, uint32_t dts) {
srs_error_t err = srs_success;
char* data = NULL;
int size = 0;
if ((err = _aac_ptr->mux_aac2flv(frame, frame_size, codec, dts, &data, &size)) != srs_success) {
return srs_error_wrap(err, "mux aac to flv");
}
return rtmp_write_packet(SrsFrameTypeAudio, dts, data, size);
}
srs_error_t rtmp_client::rtmp_write_packet(char type, uint32_t timestamp, char* data, int size) {
srs_error_t err = srs_success;
if ((err = connect()) != srs_success) {
return srs_error_wrap(err, "connect");
}
SrsSharedPtrMessage* msg = NULL;
if ((err = srs_rtmp_create_msg(type, timestamp, data, size, _rtmp_conn_ptr->sid(), &msg)) != srs_success) {
return srs_error_wrap(err, "create message");
}
srs_assert(msg);
// send out encoded msg.
if ((err = _rtmp_conn_ptr->send_and_free_message(msg)) != srs_success) {
close();
return srs_error_wrap(err, "send messages");
}
return err;
}
srs_error_t rtmp_client::on_ts_video(SrsTsMessage* msg, SrsBuffer* avs) {
srs_error_t err = srs_success;
// ensure rtmp connected.
if ((err = connect()) != srs_success) {
return srs_error_wrap(err, "connect");
}
// ts tbn to flv tbn.
uint32_t dts = (uint32_t)(msg->dts / 90);
uint32_t pts = (uint32_t)(msg->dts / 90);
// send each frame.
while (!avs->empty()) {
char* frame = NULL;
int frame_size = 0;
if ((err = _avc_ptr->annexb_demux(avs, &frame, &frame_size)) != srs_success) {
return srs_error_wrap(err, "demux annexb");
}
// 5bits, 7.3.1 NAL unit syntax,
// ISO_IEC_14496-10-AVC-2003.pdf, page 44.
// 7: SPS, 8: PPS, 5: I Frame, 1: P Frame
SrsAvcNaluType nal_unit_type = (SrsAvcNaluType)(frame[0] & 0x1f);
// ignore the nalu type sps(7), pps(8), aud(9)
if (nal_unit_type == SrsAvcNaluTypeAccessUnitDelimiter) {
continue;
}
// for sps
if (_avc_ptr->is_sps(frame, frame_size)) {
std::string sps;
if ((err = _avc_ptr->sps_demux(frame, frame_size, sps)) != srs_success) {
return srs_error_wrap(err, "demux sps");
}
if (_h264_sps == sps) {
continue;
}
_h264_sps_changed = true;
_h264_sps = sps;
if ((err = write_h264_sps_pps(dts, pts)) != srs_success) {
return srs_error_wrap(err, "write sps/pps");
}
continue;
}
// for pps
if (_avc_ptr->is_pps(frame, frame_size)) {
std::string pps;
if ((err = _avc_ptr->pps_demux(frame, frame_size, pps)) != srs_success) {
return srs_error_wrap(err, "demux pps");
}
if (_h264_pps == pps) {
continue;
}
_h264_pps_changed = true;
_h264_pps = pps;
if ((err = write_h264_sps_pps(dts, pts)) != srs_success) {
return srs_error_wrap(err, "write sps/pps");
}
continue;
}
// ibp frame.
// TODO: FIXME: we should group all frames to a rtmp/flv message from one ts message.
srs_info("mpegts: demux avc ibp frame size=%d, dts=%d", frame_size, dts);
if ((err = write_h264_ipb_frame(frame, frame_size, dts, pts)) != srs_success) {
return srs_error_wrap(err, "write frame");
}
}
return err;
}
srs_error_t rtmp_client::on_ts_audio(SrsTsMessage* msg, SrsBuffer* avs) {
srs_error_t err = srs_success;
// ensure rtmp connected.
if ((err = connect()) != srs_success) {
return srs_error_wrap(err, "connect");
}
// ts tbn to flv tbn.
uint32_t dts = (uint32_t)(msg->dts / 90);
// send each frame.
while (!avs->empty()) {
char* frame = NULL;
int frame_size = 0;
SrsRawAacStreamCodec codec;
if ((err = _aac_ptr->adts_demux(avs, &frame, &frame_size, codec)) != srs_success) {
return srs_error_wrap(err, "demux adts");
}
// ignore invalid frame,
// * atleast 1bytes for aac to decode the data.
if (frame_size <= 0) {
continue;
}
srs_info("mpegts: demux aac frame size=%d, dts=%d", frame_size, dts);
// generate sh.
if (_aac_specific_config.empty()) {
std::string sh;
if ((err = _aac_ptr->mux_sequence_header(&codec, sh)) != srs_success) {
return srs_error_wrap(err, "mux sequence header");
}
_aac_specific_config = sh;
codec.aac_packet_type = 0;
if ((err = write_audio_raw_frame((char*)sh.data(), (int)sh.length(), &codec, dts)) != srs_success) {
return srs_error_wrap(err, "write raw audio frame");
}
}
// audio raw data.
codec.aac_packet_type = 1;
if ((err = write_audio_raw_frame(frame, frame_size, &codec, dts)) != srs_success) {
return srs_error_wrap(err, "write audio raw frame");
}
}
return err;
}
srs_error_t rtmp_client::on_ts_message(SrsTsMessage* msg) {
srs_error_t err = srs_success;
srs_trace("ts demux len:%d", msg->payload->length());
// When the audio SID is private stream 1, we use common audio.
// @see https://github.com/ossrs/srs/issues/740
if (msg->channel->apply == SrsTsPidApplyAudio && msg->sid == SrsTsPESStreamIdPrivateStream1) {
msg->sid = SrsTsPESStreamIdAudioCommon;
}
// when not audio/video, or not adts/annexb format, donot support.
if (msg->stream_number() != 0) {
return srs_error_new(ERROR_STREAM_CASTER_TS_ES, "ts: unsupported stream format, sid=%#x(%s-%d)",
msg->sid, msg->is_audio()? "A":msg->is_video()? "V":"N", msg->stream_number());
}
// check supported codec
if (msg->channel->stream != SrsTsStreamVideoH264 && msg->channel->stream != SrsTsStreamAudioAAC) {
return srs_error_new(ERROR_STREAM_CASTER_TS_CODEC, "ts: unsupported stream codec=%d", msg->channel->stream);
}
// parse the stream.
SrsBuffer avs(msg->payload->bytes(), msg->payload->length());
// publish audio or video.
if (msg->channel->stream == SrsTsStreamVideoH264) {
if ((err = on_ts_video(msg, &avs)) != srs_success) {
return srs_error_wrap(err, "ts: consume video");
}
}
if (msg->channel->stream == SrsTsStreamAudioAAC) {
if ((err = on_ts_audio(msg, &avs)) != srs_success) {
return srs_error_wrap(err, "ts: consume audio");
}
}
return err;
}