1
0
Fork 0
mirror of https://github.com/ossrs/srs.git synced 2025-02-15 04:42:04 +00:00

For #299, refine the codec to format-frame-sample chain.

This commit is contained in:
winlin 2017-02-12 18:18:18 +08:00
parent c4a510b834
commit d7458c4e72
18 changed files with 990 additions and 1150 deletions

View file

@ -62,80 +62,14 @@ using namespace std;
// reset the piece id when deviation overflow this.
#define SRS_JUMP_WHEN_PIECE_DEVIATION 20
SrsHlsCacheWriter::SrsHlsCacheWriter(bool write_cache, bool write_file)
{
should_write_cache = write_cache;
should_write_file = write_file;
}
SrsHlsCacheWriter::~SrsHlsCacheWriter()
{
}
int SrsHlsCacheWriter::open(string file)
{
if (!should_write_file) {
return ERROR_SUCCESS;
}
return impl.open(file);
}
void SrsHlsCacheWriter::close()
{
if (!should_write_file) {
return;
}
impl.close();
}
bool SrsHlsCacheWriter::is_open()
{
if (!should_write_file) {
return true;
}
return impl.is_open();
}
int64_t SrsHlsCacheWriter::tellg()
{
if (!should_write_file) {
return 0;
}
return impl.tellg();
}
int SrsHlsCacheWriter::write(void* buf, size_t count, ssize_t* pnwrite)
{
if (should_write_cache) {
if (count > 0) {
data.append((char*)buf, count);
}
}
if (should_write_file) {
return impl.write(buf, count, pnwrite);
}
return ERROR_SUCCESS;
}
string SrsHlsCacheWriter::cache()
{
return data;
}
SrsHlsSegment::SrsHlsSegment(SrsTsContext* c, bool write_cache, bool write_file, SrsCodecAudio ac, SrsCodecVideo vc)
SrsHlsSegment::SrsHlsSegment(SrsTsContext* c, SrsCodecAudio ac, SrsCodecVideo vc)
{
duration = 0;
sequence_no = 0;
segment_start_dts = 0;
is_sequence_header = false;
writer = new SrsHlsCacheWriter(write_cache, write_file);
muxer = new SrsTSMuxer(writer, c, ac, vc);
writer = new SrsFileWriter();
muxer = new SrsTsMuxer(writer, c, ac, vc);
}
SrsHlsSegment::~SrsHlsSegment()
@ -290,8 +224,6 @@ SrsHlsMuxer::SrsHlsMuxer()
_sequence_no = 0;
current = NULL;
acodec = SrsCodecAudioReserved1;
should_write_cache = false;
should_write_file = true;
async = new SrsAsyncCallWorker();
context = new SrsTsContext();
}
@ -313,28 +245,26 @@ SrsHlsMuxer::~SrsHlsMuxer()
void SrsHlsMuxer::dispose()
{
if (should_write_file) {
std::vector<SrsHlsSegment*>::iterator it;
for (it = segments.begin(); it != segments.end(); ++it) {
SrsHlsSegment* segment = *it;
if (unlink(segment->full_path.c_str()) < 0) {
srs_warn("dispose unlink path failed, file=%s.", segment->full_path.c_str());
}
srs_freep(segment);
std::vector<SrsHlsSegment*>::iterator it;
for (it = segments.begin(); it != segments.end(); ++it) {
SrsHlsSegment* segment = *it;
if (unlink(segment->full_path.c_str()) < 0) {
srs_warn("dispose unlink path failed, file=%s.", segment->full_path.c_str());
}
segments.clear();
if (current) {
std::string path = current->full_path + ".tmp";
if (unlink(path.c_str()) < 0) {
srs_warn("dispose unlink path failed, file=%s", path.c_str());
}
srs_freep(current);
}
if (unlink(m3u8.c_str()) < 0) {
srs_warn("dispose unlink path failed. file=%s", m3u8.c_str());
srs_freep(segment);
}
segments.clear();
if (current) {
std::string path = current->full_path + ".tmp";
if (unlink(path.c_str()) < 0) {
srs_warn("dispose unlink path failed, file=%s", path.c_str());
}
srs_freep(current);
}
if (unlink(m3u8.c_str()) < 0) {
srs_warn("dispose unlink path failed. file=%s", m3u8.c_str());
}
// TODO: FIXME: support hls dispose in HTTP cache.
@ -407,13 +337,9 @@ int SrsHlsMuxer::update_config(SrsRequest* r, string entry_prefix,
// when update config, reset the history target duration.
max_td = (int)(fragment * _srs_config->get_hls_td_ratio(r->vhost));
// TODO: FIXME: refine better for SRS2 only support disk.
should_write_cache = false;
should_write_file = true;
// create m3u8 dir once.
m3u8_dir = srs_path_dirname(m3u8);
if (should_write_file && (ret = srs_create_dir_recursively(m3u8_dir)) != ERROR_SUCCESS) {
if ((ret = srs_create_dir_recursively(m3u8_dir)) != ERROR_SUCCESS) {
srs_error("create app dir %s failed. ret=%d", m3u8_dir.c_str(), ret);
return ret;
}
@ -468,7 +394,7 @@ int SrsHlsMuxer::segment_open(int64_t segment_start_dts)
}
// new segment.
current = new SrsHlsSegment(context, should_write_cache, should_write_file, default_acodec, default_vcodec);
current = new SrsHlsSegment(context, default_acodec, default_vcodec);
current->sequence_no = _sequence_no++;
current->segment_start_dts = segment_start_dts;
@ -540,7 +466,7 @@ int SrsHlsMuxer::segment_open(int64_t segment_start_dts)
// create dir recursively for hls.
std::string ts_dir = srs_path_dirname(current->full_path);
if (should_write_file && (ret = srs_create_dir_recursively(ts_dir)) != ERROR_SUCCESS) {
if ((ret = srs_create_dir_recursively(ts_dir)) != ERROR_SUCCESS) {
srs_error("create app dir %s failed. ret=%d", ts_dir.c_str(), ret);
return ret;
}
@ -735,7 +661,7 @@ int SrsHlsMuxer::segment_close(string log_desc)
// rename from tmp to real path
std::string tmp_file = full_path + ".tmp";
if (should_write_file && rename(tmp_file.c_str(), full_path.c_str()) < 0) {
if (rename(tmp_file.c_str(), full_path.c_str()) < 0) {
ret = ERROR_HLS_WRITE_FAILED;
srs_error("rename ts file failed, %s => %s. ret=%d",
tmp_file.c_str(), full_path.c_str(), ret);
@ -751,10 +677,8 @@ int SrsHlsMuxer::segment_close(string log_desc)
// rename from tmp to real path
std::string tmp_file = current->full_path + ".tmp";
if (should_write_file) {
if (unlink(tmp_file.c_str()) < 0) {
srs_warn("ignore unlink path failed, file=%s.", tmp_file.c_str());
}
if (unlink(tmp_file.c_str()) < 0) {
srs_warn("ignore unlink path failed, file=%s.", tmp_file.c_str());
}
srs_freep(current);
@ -788,10 +712,8 @@ int SrsHlsMuxer::segment_close(string log_desc)
for (int i = 0; i < (int)segment_to_remove.size(); i++) {
SrsHlsSegment* segment = segment_to_remove[i];
if (hls_cleanup && should_write_file) {
if (unlink(segment->full_path.c_str()) < 0) {
srs_warn("cleanup unlink path failed, file=%s.", segment->full_path.c_str());
}
if (hls_cleanup && unlink(segment->full_path.c_str()) < 0) {
srs_warn("cleanup unlink path failed, file=%s.", segment->full_path.c_str());
}
srs_freep(segment);
@ -818,7 +740,7 @@ int SrsHlsMuxer::refresh_m3u8()
std::string temp_m3u8 = m3u8 + ".temp";
if ((ret = _refresh_m3u8(temp_m3u8)) == ERROR_SUCCESS) {
if (should_write_file && rename(temp_m3u8.c_str(), m3u8.c_str()) < 0) {
if (rename(temp_m3u8.c_str(), m3u8.c_str()) < 0) {
ret = ERROR_HLS_WRITE_FAILED;
srs_error("rename m3u8 file failed. %s => %s, ret=%d", temp_m3u8.c_str(), m3u8.c_str(), ret);
}
@ -843,7 +765,7 @@ int SrsHlsMuxer::_refresh_m3u8(string m3u8_file)
return ret;
}
SrsHlsCacheWriter writer(should_write_cache, should_write_file);
SrsFileWriter writer;
if ((ret = writer.open(m3u8_file)) != ERROR_SUCCESS) {
srs_error("open m3u8 file %s failed. ret=%d", m3u8_file.c_str(), ret);
return ret;
@ -919,17 +841,54 @@ int SrsHlsMuxer::_refresh_m3u8(string m3u8_file)
return ret;
}
SrsHlsCache::SrsHlsCache()
SrsHlsController::SrsHlsController()
{
cache = new SrsTsCache();
ts = new SrsTsCache();
muxer = new SrsHlsMuxer();
}
SrsHlsCache::~SrsHlsCache()
SrsHlsController::~SrsHlsController()
{
srs_freep(cache);
srs_freep(muxer);
srs_freep(ts);
}
int SrsHlsCache::on_publish(SrsHlsMuxer* muxer, SrsRequest* req, int64_t segment_start_dts)
int SrsHlsController::initialize()
{
return muxer->initialize();
}
void SrsHlsController::dispose()
{
muxer->dispose();
}
int SrsHlsController::update_acodec(SrsCodecAudio ac)
{
return muxer->update_acodec(ac);
}
int SrsHlsController::sequence_no()
{
return muxer->sequence_no();
}
string SrsHlsController::ts_url()
{
return muxer->ts_url();
}
double SrsHlsController::duration()
{
return muxer->duration();
}
int SrsHlsController::deviation()
{
return muxer->deviation();
}
int SrsHlsController::on_publish(SrsRequest* req, int64_t segment_start_dts)
{
int ret = ERROR_SUCCESS;
@ -978,11 +937,11 @@ int SrsHlsCache::on_publish(SrsHlsMuxer* muxer, SrsRequest* req, int64_t segment
return ret;
}
int SrsHlsCache::on_unpublish(SrsHlsMuxer* muxer)
int SrsHlsController::on_unpublish()
{
int ret = ERROR_SUCCESS;
if ((ret = muxer->flush_audio(cache)) != ERROR_SUCCESS) {
if ((ret = muxer->flush_audio(ts)) != ERROR_SUCCESS) {
srs_error("m3u8 muxer flush audio failed. ret=%d", ret);
return ret;
}
@ -994,7 +953,7 @@ int SrsHlsCache::on_unpublish(SrsHlsMuxer* muxer)
return ret;
}
int SrsHlsCache::on_sequence_header(SrsHlsMuxer* muxer)
int SrsHlsController::on_sequence_header()
{
// TODO: support discontinuity for the same stream
// currently we reap and insert discontinity when encoder republish,
@ -1005,12 +964,12 @@ int SrsHlsCache::on_sequence_header(SrsHlsMuxer* muxer)
return muxer->on_sequence_header();
}
int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t pts, SrsCodecSample* sample)
int SrsHlsController::write_audio(SrsAudioFrame* frame, int64_t pts)
{
int ret = ERROR_SUCCESS;
// write audio to cache.
if ((ret = cache->cache_audio(codec, pts, sample)) != ERROR_SUCCESS) {
if ((ret = ts->cache_audio(frame, pts)) != ERROR_SUCCESS) {
return ret;
}
@ -1022,16 +981,16 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
// @see https://github.com/ossrs/srs/issues/151
// we use absolutely overflow of segment to make jwplayer/ffplay happy
// @see https://github.com/ossrs/srs/issues/151#issuecomment-71155184
if (cache->audio && muxer->is_segment_absolutely_overflow()) {
if (ts->audio && muxer->is_segment_absolutely_overflow()) {
srs_info("hls: absolute audio reap segment.");
if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) {
if ((ret = reap_segment("audio", ts->audio->pts)) != ERROR_SUCCESS) {
return ret;
}
}
// for pure audio, aggregate some frame to one.
if (muxer->pure_audio() && cache->audio) {
if (pts - cache->audio->start_pts < SRS_CONSTS_HLS_PURE_AUDIO_AGGREGATE) {
if (muxer->pure_audio() &&ts->audio) {
if (pts - ts->audio->start_pts < SRS_CONSTS_HLS_PURE_AUDIO_AGGREGATE) {
return ret;
}
}
@ -1040,19 +999,19 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
// it's ok for the hls overload, or maybe cause the audio corrupt,
// which introduced by aggregate the audios to a big one.
// @see https://github.com/ossrs/srs/issues/512
if ((ret = muxer->flush_audio(cache)) != ERROR_SUCCESS) {
if ((ret = muxer->flush_audio(ts)) != ERROR_SUCCESS) {
return ret;
}
return ret;
}
int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t dts, SrsCodecSample* sample)
int SrsHlsController::write_video(SrsVideoFrame* frame, int64_t dts)
{
int ret = ERROR_SUCCESS;
// write video to cache.
if ((ret = cache->cache_video(codec, dts, sample)) != ERROR_SUCCESS) {
if ((ret = ts->cache_video(frame, dts)) != ERROR_SUCCESS) {
return ret;
}
@ -1061,16 +1020,16 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
// do reap ts if any of:
// a. wait keyframe and got keyframe.
// b. always reap when not wait keyframe.
if (!muxer->wait_keyframe() || sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) {
if (!muxer->wait_keyframe() || frame->frame_type == SrsCodecVideoAVCFrameKeyFrame) {
// reap the segment, which will also flush the video.
if ((ret = reap_segment("video", muxer, cache->video->dts)) != ERROR_SUCCESS) {
if ((ret = reap_segment("video", ts->video->dts)) != ERROR_SUCCESS) {
return ret;
}
}
}
// flush video when got one
if ((ret = muxer->flush_video(cache)) != ERROR_SUCCESS) {
if ((ret = muxer->flush_video(ts)) != ERROR_SUCCESS) {
srs_error("m3u8 muxer flush video failed. ret=%d", ret);
return ret;
}
@ -1078,7 +1037,7 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
return ret;
}
int SrsHlsCache::reap_segment(string log_desc, SrsHlsMuxer* muxer, int64_t segment_start_dts)
int SrsHlsController::reap_segment(string log_desc, int64_t segment_start_dts)
{
int ret = ERROR_SUCCESS;
@ -1098,7 +1057,7 @@ int SrsHlsCache::reap_segment(string log_desc, SrsHlsMuxer* muxer, int64_t segme
}
// segment open, flush video first.
if ((ret = muxer->flush_video(cache)) != ERROR_SUCCESS) {
if ((ret = muxer->flush_video(ts)) != ERROR_SUCCESS) {
srs_error("m3u8 muxer flush video failed. ret=%d", ret);
return ret;
}
@ -1106,7 +1065,7 @@ int SrsHlsCache::reap_segment(string log_desc, SrsHlsMuxer* muxer, int64_t segme
// segment open, flush the audio.
// @see: ngx_rtmp_hls_open_fragment
/* start fragment with audio to make iPhone happy */
if ((ret = muxer->flush_audio(cache)) != ERROR_SUCCESS) {
if ((ret = muxer->flush_audio(ts)) != ERROR_SUCCESS) {
srs_error("m3u8 muxer flush audio failed. ret=%d", ret);
return ret;
}
@ -1125,9 +1084,7 @@ SrsHls::SrsHls()
last_update_time = 0;
jitter = new SrsRtmpJitter();
muxer = new SrsHlsMuxer();
cache = new SrsHlsCache();
controller = new SrsHlsController();
pprint = SrsPithyPrint::create_hls();
stream_dts = 0;
@ -1136,10 +1093,7 @@ SrsHls::SrsHls()
SrsHls::~SrsHls()
{
srs_freep(jitter);
srs_freep(muxer);
srs_freep(cache);
srs_freep(controller);
srs_freep(pprint);
}
@ -1149,7 +1103,7 @@ void SrsHls::dispose()
on_unpublish();
}
muxer->dispose();
controller->dispose();
}
int SrsHls::cycle()
@ -1194,7 +1148,7 @@ int SrsHls::initialize(SrsOriginHub* h, SrsFormat* f, SrsRequest* r)
req = r;
format = f;
if ((ret = muxer->initialize()) != ERROR_SUCCESS) {
if ((ret = controller->initialize()) != ERROR_SUCCESS) {
return ret;
}
@ -1217,7 +1171,7 @@ int SrsHls::on_publish()
return ret;
}
if ((ret = cache->on_publish(muxer, req, stream_dts)) != ERROR_SUCCESS) {
if ((ret = controller->on_publish(req, stream_dts)) != ERROR_SUCCESS) {
return ret;
}
@ -1239,14 +1193,14 @@ void SrsHls::on_unpublish()
return;
}
if ((ret = cache->on_unpublish(muxer)) != ERROR_SUCCESS) {
if ((ret = controller->on_unpublish()) != ERROR_SUCCESS) {
srs_error("ignore m3u8 muxer flush/close audio failed. ret=%d", ret);
}
enabled = false;
}
int SrsHls::on_audio(SrsSharedPtrMessage* shared_audio)
int SrsHls::on_audio(SrsSharedPtrMessage* shared_audio, SrsFormat* format)
{
int ret = ERROR_SUCCESS;
@ -1260,35 +1214,23 @@ int SrsHls::on_audio(SrsSharedPtrMessage* shared_audio)
SrsSharedPtrMessage* audio = shared_audio->copy();
SrsAutoFree(SrsSharedPtrMessage, audio);
sample->clear();
if ((ret = codec->audio_aac_demux(audio->payload, audio->size, sample)) != ERROR_SUCCESS) {
if (ret != ERROR_HLS_TRY_MP3) {
srs_error("hls aac demux audio failed. ret=%d", ret);
return ret;
}
if ((ret = codec->audio_mp3_demux(audio->payload, audio->size, sample)) != ERROR_SUCCESS) {
srs_error("hls mp3 demux audio failed. ret=%d", ret);
return ret;
}
}
srs_info("audio decoded, type=%d, codec=%d, cts=%d, size=%d, time=%"PRId64,
sample->frame_type, codec->audio_codec_id, sample->cts, audio->size, audio->timestamp);
SrsCodecAudio acodec = (SrsCodecAudio)codec->audio_codec_id;
// ts support audio codec: aac/mp3
srs_assert(format->acodec);
SrsCodecAudio acodec = format->acodec->id;
if (acodec != SrsCodecAudioAAC && acodec != SrsCodecAudioMP3) {
return ret;
}
// when codec changed, write new header.
if ((ret = muxer->update_acodec(acodec)) != ERROR_SUCCESS) {
if ((ret = controller->update_acodec(acodec)) != ERROR_SUCCESS) {
srs_error("http: ts audio write header failed. ret=%d", ret);
return ret;
}
// ignore sequence header
if (acodec == SrsCodecAudioAAC && sample->aac_packet_type == SrsCodecAudioTypeSequenceHeader) {
return cache->on_sequence_header(muxer);
srs_assert(format->audio);
if (acodec == SrsCodecAudioAAC && format->audio->aac_packet_type == SrsCodecAudioTypeSequenceHeader) {
return controller->on_sequence_header();
}
// TODO: FIXME: config the jitter of HLS.
@ -1303,7 +1245,7 @@ int SrsHls::on_audio(SrsSharedPtrMessage* shared_audio)
// for pure audio, we need to update the stream dts also.
stream_dts = dts;
if ((ret = cache->write_audio(codec, muxer, dts, sample)) != ERROR_SUCCESS) {
if ((ret = controller->write_audio(format->audio, dts)) != ERROR_SUCCESS) {
srs_error("hls cache write audio failed. ret=%d", ret);
return ret;
}
@ -1311,7 +1253,7 @@ int SrsHls::on_audio(SrsSharedPtrMessage* shared_audio)
return ret;
}
int SrsHls::on_video(SrsSharedPtrMessage* shared_video, bool is_sps_pps)
int SrsHls::on_video(SrsSharedPtrMessage* shared_video, SrsFormat* format)
{
int ret = ERROR_SUCCESS;
@ -1325,34 +1267,21 @@ int SrsHls::on_video(SrsSharedPtrMessage* shared_video, bool is_sps_pps)
SrsSharedPtrMessage* video = shared_video->copy();
SrsAutoFree(SrsSharedPtrMessage, video);
// user can disable the sps parse to workaround when parse sps failed.
// @see https://github.com/ossrs/srs/issues/474
if (is_sps_pps) {
codec->avc_parse_sps = _srs_config->get_parse_sps(req->vhost);
}
sample->clear();
if ((ret = codec->video_avc_demux(video->payload, video->size, sample)) != ERROR_SUCCESS) {
srs_error("hls codec demux video failed. ret=%d", ret);
return ret;
}
srs_info("video decoded, type=%d, codec=%d, avc=%d, cts=%d, size=%d, time=%"PRId64,
sample->frame_type, codec->video_codec_id, sample->avc_packet_type, sample->cts, video->size, video->timestamp);
// ignore info frame,
// @see https://github.com/ossrs/srs/issues/288#issuecomment-69863909
if (sample->frame_type == SrsCodecVideoAVCFrameVideoInfoFrame) {
srs_assert(format->video);
if (format->video->frame_type == SrsCodecVideoAVCFrameVideoInfoFrame) {
return ret;
}
if (codec->video_codec_id != SrsCodecVideoAVC) {
srs_assert(format->vcodec);
if (format->vcodec->id != SrsCodecVideoAVC) {
return ret;
}
// ignore sequence header
if (sample->frame_type == SrsCodecVideoAVCFrameKeyFrame
&& sample->avc_packet_type == SrsCodecVideoAVCTypeSequenceHeader) {
return cache->on_sequence_header(muxer);
if (format->video->avc_packet_type == SrsCodecVideoAVCTypeSequenceHeader) {
return controller->on_sequence_header();
}
// TODO: FIXME: config the jitter of HLS.
@ -1363,7 +1292,7 @@ int SrsHls::on_video(SrsSharedPtrMessage* shared_video, bool is_sps_pps)
int64_t dts = video->timestamp * 90;
stream_dts = dts;
if ((ret = cache->write_video(codec, muxer, dts, sample)) != ERROR_SUCCESS) {
if ((ret = controller->write_video(format->video, dts)) != ERROR_SUCCESS) {
srs_error("hls cache write video failed. ret=%d", ret);
return ret;
}
@ -1378,15 +1307,16 @@ void SrsHls::hls_show_mux_log()
{
pprint->elapse();
// reportable
if (pprint->can_print()) {
// the run time is not equals to stream time,
// @see: https://github.com/ossrs/srs/issues/81#issuecomment-48100994
// it's ok.
srs_trace("-> "SRS_CONSTS_LOG_HLS" time=%"PRId64", stream dts=%"PRId64"(%"PRId64"ms), sno=%d, ts=%s, dur=%.2f, dva=%dp",
pprint->age(), stream_dts, stream_dts / 90, muxer->sequence_no(), muxer->ts_url().c_str(),
muxer->duration(), muxer->deviation());
if (!pprint->can_print()) {
return;
}
// the run time is not equals to stream time,
// @see: https://github.com/ossrs/srs/issues/81#issuecomment-48100994
// it's ok.
srs_trace("-> "SRS_CONSTS_LOG_HLS" time=%"PRId64", stream dts=%"PRId64"(%"PRId64"ms), sno=%d, ts=%s, dur=%.2f, dva=%dp",
pprint->age(), stream_dts, stream_dts / 90, controller->sequence_no(), controller->ts_url().c_str(),
controller->duration(), controller->deviation());
}

View file

@ -40,7 +40,7 @@ class SrsFormat;
class SrsSharedPtrMessage;
class SrsAmf0Object;
class SrsRtmpJitter;
class SrsTSMuxer;
class SrsTsMuxer;
class SrsRequest;
class SrsPithyPrint;
class SrsSource;
@ -53,41 +53,6 @@ class SrsHlsSegment;
class SrsTsCache;
class SrsTsContext;
/**
* write to file and cache.
*/
class SrsHlsCacheWriter : public SrsFileWriter
{
private:
SrsFileWriter impl;
std::string data;
bool should_write_cache;
bool should_write_file;
public:
SrsHlsCacheWriter(bool write_cache, bool write_file);
virtual ~SrsHlsCacheWriter();
public:
/**
* open file writer, can open then close then open...
*/
virtual int open(std::string file);
virtual void close();
public:
virtual bool is_open();
virtual int64_t tellg();
public:
/**
* write to file.
* @param pnwrite the output nb_write, NULL to ignore.
*/
virtual int write(void* buf, size_t count, ssize_t* pnwrite);
public:
/**
* get the string cache.
*/
virtual std::string cache();
};
/**
* the wrapper of m3u8 segment from specification:
*
@ -106,14 +71,14 @@ public:
// ts full file to write.
std::string full_path;
// the muxer to write ts.
SrsHlsCacheWriter* writer;
SrsTSMuxer* muxer;
SrsFileWriter* writer;
SrsTsMuxer* muxer;
// current segment start dts for m3u8
int64_t segment_start_dts;
// whether current segement is sequence header.
bool is_sequence_header;
public:
SrsHlsSegment(SrsTsContext* c, bool write_cache, bool write_file, SrsCodecAudio ac, SrsCodecVideo vc);
SrsHlsSegment(SrsTsContext* c, SrsCodecAudio ac, SrsCodecVideo vc);
virtual ~SrsHlsSegment();
public:
/**
@ -200,10 +165,6 @@ private:
int max_td;
std::string m3u8;
std::string m3u8_url;
private:
// TODO: FIXME: remove it.
bool should_write_cache;
bool should_write_file;
private:
/**
* m3u8 segments.
@ -303,34 +264,46 @@ private:
* so we must gather audio frame together, and recalc the timestamp @see SrsTsAacJitter,
* we use a aac jitter to correct the audio pts.
*/
class SrsHlsCache
class SrsHlsController
{
private:
SrsTsCache* cache;
// The HLS muxer to reap ts and m3u8.
// The TS is cached to SrsTsCache then flush to ts segment.
SrsHlsMuxer* muxer;
// The TS cache
SrsTsCache* ts;
public:
SrsHlsCache();
virtual ~SrsHlsCache();
SrsHlsController();
virtual ~SrsHlsController();
public:
virtual int initialize();
virtual void dispose();
virtual int update_acodec(SrsCodecAudio ac);
virtual int sequence_no();
virtual std::string ts_url();
virtual double duration();
virtual int deviation();
public:
/**
* when publish or unpublish stream.
*/
virtual int on_publish(SrsHlsMuxer* muxer, SrsRequest* req, int64_t segment_start_dts);
virtual int on_unpublish(SrsHlsMuxer* muxer);
virtual int on_publish(SrsRequest* req, int64_t segment_start_dts);
virtual int on_unpublish();
/**
* when get sequence header,
* must write a #EXT-X-DISCONTINUITY to m3u8.
* @see: hls-m3u8-draft-pantos-http-live-streaming-12.txt
* @see: 3.4.11. EXT-X-DISCONTINUITY
*/
virtual int on_sequence_header(SrsHlsMuxer* muxer);
virtual int on_sequence_header();
/**
* write audio to cache, if need to flush, flush to muxer.
*/
virtual int write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t pts, SrsCodecSample* sample);
virtual int write_audio(SrsAudioFrame* frame, int64_t pts);
/**
* write video to muxer.
*/
virtual int write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t dts, SrsCodecSample* sample);
virtual int write_video(SrsVideoFrame* frame, int64_t dts);
private:
/**
* reopen the muxer for a new hls segment,
@ -338,7 +311,7 @@ private:
* then write the key frame to the new segment.
* so, user must reap_segment then flush_video to hls muxer.
*/
virtual int reap_segment(std::string log_desc, SrsHlsMuxer* muxer, int64_t segment_start_dts);
virtual int reap_segment(std::string log_desc, int64_t segment_start_dts);
};
/**
@ -348,8 +321,7 @@ private:
class SrsHls
{
private:
SrsHlsMuxer* muxer;
SrsHlsCache* cache;
SrsHlsController* controller;
private:
SrsRequest* req;
bool enabled;
@ -400,13 +372,14 @@ public:
* mux the audio packets to ts.
* @param shared_audio, directly ptr, copy it if need to save it.
*/
virtual int on_audio(SrsSharedPtrMessage* shared_audio);
virtual int on_audio(SrsSharedPtrMessage* shared_audio, SrsFormat* format);
/**
* mux the video packets to ts.
* @param shared_video, directly ptr, copy it if need to save it.
* @param is_sps_pps whether the video is h.264 sps/pps.
*/
virtual int on_video(SrsSharedPtrMessage* shared_video, bool is_sps_pps);
// TODO: FIXME: Remove param is_sps_pps.
virtual int on_video(SrsSharedPtrMessage* shared_video, SrsFormat* format);
private:
virtual void hls_show_mux_log();
};

View file

@ -653,75 +653,6 @@ bool SrsLiveEntry::is_mp3()
return _is_mp3;
}
SrsHlsM3u8Stream::SrsHlsM3u8Stream()
{
}
SrsHlsM3u8Stream::~SrsHlsM3u8Stream()
{
}
void SrsHlsM3u8Stream::set_m3u8(std::string v)
{
m3u8 = v;
}
int SrsHlsM3u8Stream::serve_http(ISrsHttpResponseWriter* w, ISrsHttpMessage* r)
{
int ret = ERROR_SUCCESS;
std::string data = m3u8;
w->header()->set_content_length((int)data.length());
w->header()->set_content_type("application/x-mpegURL;charset=utf-8");
if ((ret = w->write((char*)data.data(), (int)data.length())) != ERROR_SUCCESS) {
if (!srs_is_client_gracefully_close(ret)) {
srs_error("send m3u8 failed. ret=%d", ret);
}
return ret;
}
return ret;
}
SrsHlsTsStream::SrsHlsTsStream()
{
}
SrsHlsTsStream::~SrsHlsTsStream()
{
}
void SrsHlsTsStream::set_ts(std::string v)
{
ts = v;
}
int SrsHlsTsStream::serve_http(ISrsHttpResponseWriter* w, ISrsHttpMessage* r)
{
int ret = ERROR_SUCCESS;
std::string data = ts;
w->header()->set_content_length((int)data.length());
w->header()->set_content_type("video/MP2T");
if ((ret = w->write((char*)data.data(), (int)data.length())) != ERROR_SUCCESS) {
if (!srs_is_client_gracefully_close(ret)) {
srs_error("send ts failed. ret=%d", ret);
}
return ret;
}
return ret;
}
SrsHlsEntry::SrsHlsEntry()
{
tmpl = NULL;
}
SrsHttpStreamServer::SrsHttpStreamServer(SrsServer* svr)
{
server = svr;

View file

@ -262,58 +262,6 @@ public:
bool is_aac();
};
/**
* the m3u8 stream handler.
*/
class SrsHlsM3u8Stream : public ISrsHttpHandler
{
private:
std::string m3u8;
public:
SrsHlsM3u8Stream();
virtual ~SrsHlsM3u8Stream();
public:
virtual void set_m3u8(std::string v);
public:
virtual int serve_http(ISrsHttpResponseWriter* w, ISrsHttpMessage* r);
};
/**
* the ts stream handler.
*/
class SrsHlsTsStream : public ISrsHttpHandler
{
private:
std::string ts;
public:
SrsHlsTsStream();
virtual ~SrsHlsTsStream();
public:
virtual void set_ts(std::string v);
public:
virtual int serve_http(ISrsHttpResponseWriter* w, ISrsHttpMessage* r);
};
/**
* the srs hls entry.
*/
// TODO: FIXME: use hte hls template and entry.
struct SrsHlsEntry
{
// for template, the mount contains variables.
// for concrete stream, the mount is url to access.
std::string mount;
// the template to create the entry
SrsHlsEntry* tmpl;
// key: the m3u8/ts file path.
// value: the http handler.
std::map<std::string, ISrsHttpHandler*> streams;
SrsHlsEntry();
};
/**
* the http stream server instance,
* serve http stream, for example, flv/ts/mp3/aac live stream.

View file

@ -44,6 +44,7 @@ using namespace std;
#include <srs_app_pithy_print.hpp>
#include <srs_app_rtmp_conn.hpp>
#include <srs_protocol_utility.hpp>
#include <srs_protocol_format.hpp>
#ifdef SRS_AUTO_STREAM_CASTER
@ -135,13 +136,13 @@ int SrsRtpConn::on_udp_packet(sockaddr_in* from, char* buf, int nb_buf)
SrsRtspAudioCache::SrsRtspAudioCache()
{
dts = 0;
audio_samples = NULL;
audio = NULL;
payload = NULL;
}
SrsRtspAudioCache::~SrsRtspAudioCache()
{
srs_freep(audio_samples);
srs_freep(audio);
srs_freep(payload);
}
@ -456,10 +457,10 @@ int SrsRtspConn::on_rtp_audio(SrsRtpPacket* pkt, int64_t dts)
// cache current audio to kickoff.
acache->dts = dts;
acache->audio_samples = pkt->audio_samples;
acache->audio = pkt->audio;
acache->payload = pkt->payload;
pkt->audio_samples = NULL;
pkt->audio = NULL;
pkt->payload = NULL;
return ret;
@ -474,11 +475,11 @@ int SrsRtspConn::kickoff_audio_cache(SrsRtpPacket* pkt, int64_t dts)
return ret;
}
if (dts - acache->dts > 0 && acache->audio_samples->nb_sample_units > 0) {
int64_t delta = (dts - acache->dts) / acache->audio_samples->nb_sample_units;
for (int i = 0; i < acache->audio_samples->nb_sample_units; i++) {
char* frame = acache->audio_samples->sample_units[i].bytes;
int nb_frame = acache->audio_samples->sample_units[i].size;
if (dts - acache->dts > 0 && acache->audio->nb_samples > 0) {
int64_t delta = (dts - acache->dts) / acache->audio->nb_samples;
for (int i = 0; i < acache->audio->nb_samples; i++) {
char* frame = acache->audio->samples[i].bytes;
int nb_frame = acache->audio->samples[i].size;
int64_t timestamp = (acache->dts + delta * i) / 90;
acodec->aac_packet_type = 1;
if ((ret = write_audio_raw_frame(frame, nb_frame, acodec, (uint32_t)timestamp)) != ERROR_SUCCESS) {
@ -488,7 +489,7 @@ int SrsRtspConn::kickoff_audio_cache(SrsRtpPacket* pkt, int64_t dts)
}
acache->dts = 0;
srs_freep(acache->audio_samples);
srs_freep(acache->audio);
srs_freep(acache->payload);
return ret;
@ -510,13 +511,17 @@ int SrsRtspConn::write_sequence_header()
if (true) {
std::string sh = aac_specific_config;
SrsAvcAacCodec dec;
if ((ret = dec.audio_aac_sequence_header_demux((char*)sh.c_str(), (int)sh.length())) != ERROR_SUCCESS) {
SrsFormat* format = new SrsFormat();
SrsAutoFree(SrsFormat, format);
if ((ret = format->on_aac_sequence_header((char*)sh.c_str(), (int)sh.length())) != ERROR_SUCCESS) {
return ret;
}
SrsAudioCodec* dec = format->acodec;
acodec->sound_format = SrsCodecAudioAAC;
acodec->sound_type = (dec.aac_channels == 2)? SrsCodecAudioSoundTypeStereo : SrsCodecAudioSoundTypeMono;
acodec->sound_type = (dec->aac_channels == 2)? SrsCodecAudioSoundTypeStereo : SrsCodecAudioSoundTypeMono;
acodec->sound_size = SrsCodecAudioSampleSize16bit;
acodec->aac_packet_type = 0;
@ -526,7 +531,7 @@ int SrsRtspConn::write_sequence_header()
16000, 12000, 11025, 8000,
7350, 0, 0, 0
};
switch (aac_sample_rates[dec.aac_sample_rate]) {
switch (aac_sample_rates[dec->aac_sample_rate]) {
case 11025:
acodec->sound_rate = SrsCodecAudioSampleRate11025;
break;

View file

@ -53,7 +53,7 @@ class SrsRawH264Stream;
class SrsRawAacStream;
struct SrsRawAacStreamCodec;
class SrsSharedPtrMessage;
class SrsCodecSample;
class SrsAudioFrame;
class SrsSimpleStream;
class SrsPithyPrint;
class SrsSimpleRtmpClient;
@ -87,7 +87,7 @@ public:
struct SrsRtspAudioCache
{
int64_t dts;
SrsCodecSample* audio_samples;
SrsAudioFrame* audio;
SrsSimpleStream* payload;
SrsRtspAudioCache();

View file

@ -854,7 +854,7 @@ SrsOriginHub::SrsOriginHub()
hds = new SrsHds();
#endif
ng_exec = new SrsNgExec();
format = new SrsFormat();
format = new SrsRtmpFormat();
_srs_config->subscribe(this);
}
@ -971,7 +971,29 @@ int SrsOriginHub::on_audio(SrsSharedPtrMessage* shared_audio)
return ret;
}
if ((ret = hls->on_audio(msg)) != ERROR_SUCCESS) {
// cache the sequence header if aac
// donot cache the sequence header to gop_cache, return here.
if (format->is_aac_sequence_header()) {
srs_assert(format->acodec);
SrsAudioCodec* c = format->acodec;
static int flv_sample_sizes[] = {8, 16, 0};
static int flv_sound_types[] = {1, 2, 0};
// when got audio stream info.
SrsStatistic* stat = SrsStatistic::instance();
if ((ret = stat->on_audio_info(req, SrsCodecAudioAAC, c->sound_rate, c->sound_type, c->aac_object)) != ERROR_SUCCESS) {
return ret;
}
srs_trace("%dB audio sh, codec(%d, profile=%s, %dchannels, %dkbps, %dHZ), flv(%dbits, %dchannels, %dHZ)",
msg->size, c->id, srs_codec_aac_object2str(c->aac_object).c_str(), c->aac_channels,
c->audio_data_rate / 1000, aac_sample_rates[c->aac_sample_rate],
flv_sample_sizes[c->sound_size], flv_sound_types[c->sound_type],
flv_sample_rates[c->sound_rate]);
}
if ((ret = hls->on_audio(msg, format)) != ERROR_SUCCESS) {
// apply the error strategy for hls.
// @see https://github.com/ossrs/srs/issues/264
std::string hls_error_strategy = _srs_config->get_hls_on_error(req->vhost);
@ -1044,12 +1066,36 @@ int SrsOriginHub::on_video(SrsSharedPtrMessage* shared_video, bool is_sequence_h
SrsSharedPtrMessage* msg = shared_video;
if ((ret = format->on_video(msg, is_sequence_header)) != ERROR_SUCCESS) {
// user can disable the sps parse to workaround when parse sps failed.
// @see https://github.com/ossrs/srs/issues/474
if (is_sequence_header) {
format->avc_parse_sps = _srs_config->get_parse_sps(req->vhost);
}
if ((ret = format->on_video(msg)) != ERROR_SUCCESS) {
srs_error("Codec parse video failed, ret=%d", ret);
return ret;
}
if ((ret = hls->on_video(msg, is_sequence_header)) != ERROR_SUCCESS) {
// cache the sequence header if h264
// donot cache the sequence header to gop_cache, return here.
if (format->is_avc_sequence_header()) {
SrsVideoCodec* c = format->vcodec;
srs_assert(c);
// when got video stream info.
SrsStatistic* stat = SrsStatistic::instance();
if ((ret = stat->on_video_info(req, SrsCodecVideoAVC, c->avc_profile, c->avc_level, c->width, c->height)) != ERROR_SUCCESS) {
return ret;
}
srs_trace("%dB video sh, codec(%d, profile=%s, level=%s, %dx%d, %dkbps, %.1ffps, %.1fs)",
msg->size, c->id, srs_codec_avc_profile2str(c->avc_profile).c_str(),
srs_codec_avc_level2str(c->avc_level).c_str(), c->width, c->height,
c->video_data_rate / 1000, c->frame_rate, c->duration);
}
if ((ret = hls->on_video(msg, format)) != ERROR_SUCCESS) {
// apply the error strategy for hls.
// @see https://github.com/ossrs/srs/issues/264
std::string hls_error_strategy = _srs_config->get_hls_on_error(req->vhost);
@ -1341,11 +1387,11 @@ int SrsOriginHub::on_reload_vhost_hls(string vhost)
// when reload to start hls, hls will never get the sequence header in stream,
// use the SrsSource.on_hls_start to push the sequence header to HLS.
// TODO: maybe need to decode the metadata?
if (cache_sh_video && (ret = hls->on_video(cache_sh_video, true)) != ERROR_SUCCESS) {
if (cache_sh_video && (ret = hls->on_video(cache_sh_video, format)) != ERROR_SUCCESS) {
srs_error("hls process video sequence header message failed. ret=%d", ret);
return ret;
}
if (cache_sh_audio && (ret = hls->on_audio(cache_sh_audio)) != ERROR_SUCCESS) {
if (cache_sh_audio && (ret = hls->on_audio(cache_sh_audio, format)) != ERROR_SUCCESS) {
srs_error("hls process audio sequence header message failed. ret=%d", ret);
return ret;
}
@ -2137,35 +2183,6 @@ int SrsSource::on_audio_imp(SrsSharedPtrMessage* msg)
}
}
// cache the sequence header if aac
// donot cache the sequence header to gop_cache, return here.
if (is_aac_sequence_header) {
// parse detail audio codec
SrsAvcAacCodec codec;
SrsCodecSample sample;
if ((ret = codec.audio_aac_demux(msg->payload, msg->size, &sample)) != ERROR_SUCCESS) {
srs_error("source codec demux audio failed. ret=%d", ret);
return ret;
}
static int flv_sample_sizes[] = {8, 16, 0};
static int flv_sound_types[] = {1, 2, 0};
// when got audio stream info.
SrsStatistic* stat = SrsStatistic::instance();
if ((ret = stat->on_audio_info(req, SrsCodecAudioAAC, sample.sound_rate, sample.sound_type, codec.aac_object)) != ERROR_SUCCESS) {
return ret;
}
srs_trace("%dB audio sh, codec(%d, profile=%s, %dchannels, %dkbps, %dHZ), "
"flv(%dbits, %dchannels, %dHZ)",
msg->size, codec.audio_codec_id,
srs_codec_aac_object2str(codec.aac_object).c_str(), codec.aac_channels,
codec.audio_data_rate / 1000, aac_sample_rates[codec.aac_sample_rate],
flv_sample_sizes[sample.sound_size], flv_sound_types[sample.sound_type],
flv_sample_rates[sample.sound_rate]);
}
// copy to all consumer
if (!drop_for_reduce) {
for (int i = 0; i < (int)consumers.size(); i++) {
@ -2296,31 +2313,6 @@ int SrsSource::on_video_imp(SrsSharedPtrMessage* msg)
// donot cache the sequence header to gop_cache, return here.
if (is_sequence_header) {
meta->update_vsh(msg);
// parse detail audio codec
SrsAvcAacCodec codec;
// user can disable the sps parse to workaround when parse sps failed.
// @see https://github.com/ossrs/srs/issues/474
codec.avc_parse_sps = _srs_config->get_parse_sps(req->vhost);
SrsCodecSample sample;
if ((ret = codec.video_avc_demux(msg->payload, msg->size, &sample)) != ERROR_SUCCESS) {
srs_error("source codec demux video failed. ret=%d", ret);
return ret;
}
// when got video stream info.
SrsStatistic* stat = SrsStatistic::instance();
if ((ret = stat->on_video_info(req, SrsCodecVideoAVC, codec.avc_profile, codec.avc_level, codec.width, codec.height)) != ERROR_SUCCESS) {
return ret;
}
srs_trace("%dB video sh, codec(%d, profile=%s, level=%s, %dx%d, %dkbps, %dfps, %ds)",
msg->size, codec.video_codec_id,
srs_codec_avc_profile2str(codec.avc_profile).c_str(),
srs_codec_avc_level2str(codec.avc_level).c_str(), codec.width, codec.height,
codec.video_data_rate / 1000, codec.frame_rate, codec.duration);
}
// Copy to hub to all utilities.

View file

@ -38,7 +38,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <srs_app_reload.hpp>
#include <srs_core_performance.hpp>
class SrsFormat;
class SrsRtmpFormat;
class SrsConsumer;
class SrsPlayEdge;
class SrsPublishEdge;
@ -423,7 +423,7 @@ private:
bool is_active;
private:
// The format, codec information.
SrsFormat* format;
SrsRtmpFormat* format;
// hls handler.
SrsHls* hls;
// The DASH encoder.

View file

@ -93,7 +93,6 @@ int SrsAacEncoder::write_audio(int64_t timestamp, char* data, int size)
// @see: E.4.2 Audio Tags, video_file_format_spec_v10_1.pdf, page 76
int8_t sound_format = stream->read_1bytes();
// @see: SrsAvcAacCodec::audio_aac_demux
//int8_t sound_type = sound_format & 0x01;
//int8_t sound_size = (sound_format >> 1) & 0x01;
//int8_t sound_rate = (sound_format >> 2) & 0x03;

File diff suppressed because it is too large Load diff

View file

@ -201,7 +201,6 @@ enum SrsCodecFlvTag
/**
* Annex E. The FLV File Format
* @see SrsAvcAacCodec for the media stream codec.
*/
class SrsFlvCodec
{
@ -375,6 +374,7 @@ std::string srs_codec_aac_profile2str(SrsAacProfile aac_profile);
enum SrsAacObjectType
{
SrsAacObjectTypeReserved = 0,
SrsAacObjectTypeForbidden = 0,
// Table 1.1 - Audio Object Type definition
// @see @see ISO_IEC_14496-3-AAC-2001.pdf, page 23
@ -454,9 +454,9 @@ class SrsSample
{
public:
// The size of unit.
int nb_unit;
int size;
// The ptr of unit, user must manage it.
char* unit;
char* bytes;
public:
SrsSample();
virtual ~SrsSample();
@ -472,9 +472,6 @@ class SrsCodec
public:
SrsCodec();
virtual ~SrsCodec();
public:
// Get the codec type.
virtual SrsCodecFlvTag codec() = 0;
};
/**
@ -484,17 +481,41 @@ class SrsAudioCodec : public SrsCodec
{
public:
// audio specified
SrsCodecAudio acodec;
SrsCodecAudio id;
// audio aac specified.
SrsCodecAudioSampleRate sound_rate;
SrsCodecAudioSampleSize sound_size;
SrsCodecAudioSoundType sound_type;
SrsCodecAudioType aac_packet_type;
int audio_data_rate; // in bps
public:
/**
* audio specified
* audioObjectType, in 1.6.2.1 AudioSpecificConfig, page 33,
* 1.5.1.1 Audio object type definition, page 23,
* in ISO_IEC_14496-3-AAC-2001.pdf.
*/
SrsAacObjectType aac_object;
/**
* samplingFrequencyIndex
*/
uint8_t aac_sample_rate;
/**
* channelConfiguration
*/
uint8_t aac_channels;
public:
/**
* the aac extra data, the AAC sequence header,
* without the flv codec header,
* @see: ffmpeg, AVCodecContext::extradata
*/
int aac_extra_size;
char* aac_extra_data;
public:
SrsAudioCodec();
virtual ~SrsAudioCodec();
public:
virtual SrsCodecFlvTag codec();
virtual bool is_aac_codec_ok();
};
/**
@ -503,29 +524,54 @@ public:
class SrsVideoCodec : public SrsCodec
{
public:
// video specified
SrsCodecVideoAVCFrame frame_type;
SrsCodecVideoAVCType avc_packet_type;
// whether sample_units contains IDR frame.
bool has_idr;
// Whether exists AUD NALU.
bool has_aud;
// Whether exists SPS/PPS NALU.
bool has_sps_pps;
// The first nalu type.
SrsAvcNaluType first_nalu_type;
SrsCodecVideo id;
int video_data_rate; // in bps
double frame_rate;
double duration;
int width;
int height;
public:
/**
* the avc extra data, the AVC sequence header,
* without the flv codec header,
* @see: ffmpeg, AVCodecContext::extradata
*/
int avc_extra_size;
char* avc_extra_data;
public:
/**
* video specified
*/
// profile_idc, ISO_IEC_14496-10-AVC-2003.pdf, page 45.
SrsAvcProfile avc_profile;
// level_idc, ISO_IEC_14496-10-AVC-2003.pdf, page 45.
SrsAvcLevel avc_level;
// lengthSizeMinusOne, ISO_IEC_14496-15-AVC-format-2012.pdf, page 16
int8_t NAL_unit_length;
uint16_t sequenceParameterSetLength;
char* sequenceParameterSetNALUnit;
uint16_t pictureParameterSetLength;
char* pictureParameterSetNALUnit;
public:
// the avc payload format.
SrsAvcPayloadFormat payload_format;
public:
SrsVideoCodec();
virtual ~SrsVideoCodec();
public:
virtual SrsCodecFlvTag codec();
virtual bool is_avc_codec_ok();
};
/**
* A codec frame, consists of a codec and a group of samples.
* A frame, consists of a codec and a group of samples.
*/
class SrsFrame
{
public:
// The DTS/PTS in milliseconds, which is TBN=1000.
int64_t dts;
// PTS = DTS + CTS.
int32_t cts;
public:
// The codec info of frame.
SrsCodec* codec;
@ -536,43 +582,32 @@ public:
public:
SrsFrame();
virtual ~SrsFrame();
public:
// Initialize the frame, to parse sampels.
virtual int initialize(SrsCodec* c);
// Add a sample to frame.
virtual int add_sample(char* bytes, int size);
};
/**
* the samples in the flv audio/video packet.
* the sample used to analysis a video/audio packet,
* split the h.264 NALUs to buffers, or aac raw data to a buffer,
* and decode the video/audio specified infos.
*
* the sample unit:
* a video packet codec in h.264 contains many NALUs, each is a sample unit.
* a audio packet codec in aac is a sample unit.
* @remark, the video/audio sequence header is not sample unit,
* all sequence header stores as extra data,
* @see SrsAvcAacCodec.avc_extra_data and SrsAvcAacCodec.aac_extra_data
* @remark, user must clear all samples before decode a new video/audio packet.
*/
class SrsCodecSample
* A audio frame, besides a frame, contains the audio frame info, such as frame type.
*/
class SrsAudioFrame : public SrsFrame
{
public:
/**
* each audio/video raw data packet will dumps to one or multiple buffers,
* the buffers will write to hls and clear to reset.
* generally, aac audio packet corresponding to one buffer,
* where avc/h264 video packet may contains multiple buffer.
*/
int nb_sample_units;
SrsCodecSampleUnit sample_units[SRS_MAX_CODEC_SAMPLE];
SrsCodecAudioType aac_packet_type;
public:
/**
* whether the sample is video sample which demux from video packet.
*/
bool is_video;
/**
* CompositionTime, video_file_format_spec_v10_1.pdf, page 78.
* cts = pts - dts, where dts = flvheader->timestamp.
*/
int32_t cts;
SrsAudioFrame();
virtual ~SrsAudioFrame();
public:
virtual SrsAudioCodec* acodec();
};
/**
* A video frame, besides a frame, contains the video frame info, such as frame type.
*/
class SrsVideoFrame : public SrsFrame
{
public:
// video specified
SrsCodecVideoAVCFrame frame_type;
@ -586,171 +621,74 @@ public:
// The first nalu type.
SrsAvcNaluType first_nalu_type;
public:
// audio specified
SrsCodecAudio acodec;
// audio aac specified.
SrsCodecAudioSampleRate sound_rate;
SrsCodecAudioSampleSize sound_size;
SrsCodecAudioSoundType sound_type;
SrsCodecAudioType aac_packet_type;
SrsVideoFrame();
virtual ~SrsVideoFrame();
public:
SrsCodecSample();
virtual ~SrsCodecSample();
// Add the sample without ANNEXB or IBMF header, or RAW AAC or MP3 data.
virtual int add_sample(char* bytes, int size);
public:
/**
* clear all samples.
* the sample units never copy the bytes, it directly use the ptr,
* so when video/audio packet is destroyed, the sample must be clear.
* in a word, user must clear sample before demux it.
* @remark demux sample use SrsAvcAacCodec.audio_aac_demux or video_avc_demux.
*/
void clear();
/**
* add the a sample unit, it's a h.264 NALU or aac raw data.
* the sample unit directly use the ptr of packet bytes,
* so user must never use sample unit when packet is destroyed.
* in a word, user must clear sample before demux it.
*/
int add_sample_unit(char* bytes, int size);
virtual SrsVideoCodec* vcodec();
};
/**
* the h264/avc and aac codec, for media stream.
*
* to demux the FLV/RTMP video/audio packet to sample,
* add each NALUs of h.264 as a sample unit to sample,
* while the entire aac raw data as a sample unit.
*
* for sequence header,
* demux it and save it in the avc_extra_data and aac_extra_data,
*
* for the codec info, such as audio sample rate,
* decode from FLV/RTMP header, then use codec info in sequence
* header to override it.
*/
class SrsAvcAacCodec
* A codec format, including one or many stream, each stream identified by a frame.
* For example, a typical RTMP stream format, consits of a video and audio frame.
* Maybe some RTMP stream only has a audio stream, for instance, redio application.
*/
class SrsFormat
{
private:
SrsBuffer* stream;
public:
/**
* metadata specified
*/
int duration;
int width;
int height;
int frame_rate;
// @see: SrsCodecVideo
int video_codec_id;
int video_data_rate; // in bps
// @see: SrsCod ecAudioType
int audio_codec_id;
int audio_data_rate; // in bps
public:
/**
* video specified
*/
// profile_idc, ISO_IEC_14496-10-AVC-2003.pdf, page 45.
SrsAvcProfile avc_profile;
// level_idc, ISO_IEC_14496-10-AVC-2003.pdf, page 45.
SrsAvcLevel avc_level;
// lengthSizeMinusOne, ISO_IEC_14496-15-AVC-format-2012.pdf, page 16
int8_t NAL_unit_length;
uint16_t sequenceParameterSetLength;
char* sequenceParameterSetNALUnit;
uint16_t pictureParameterSetLength;
char* pictureParameterSetNALUnit;
private:
// the avc payload format.
SrsAvcPayloadFormat payload_format;
public:
/**
* audio specified
* audioObjectType, in 1.6.2.1 AudioSpecificConfig, page 33,
* 1.5.1.1 Audio object type definition, page 23,
* in ISO_IEC_14496-3-AAC-2001.pdf.
*/
SrsAacObjectType aac_object;
/**
* samplingFrequencyIndex
*/
uint8_t aac_sample_rate;
/**
* channelConfiguration
*/
uint8_t aac_channels;
public:
/**
* the avc extra data, the AVC sequence header,
* without the flv codec header,
* @see: ffmpeg, AVCodecContext::extradata
*/
int avc_extra_size;
char* avc_extra_data;
/**
* the aac extra data, the AAC sequence header,
* without the flv codec header,
* @see: ffmpeg, AVCodecContext::extradata
*/
int aac_extra_size;
char* aac_extra_data;
SrsAudioFrame* audio;
SrsAudioCodec* acodec;
SrsVideoFrame* video;
SrsVideoCodec* vcodec;
SrsBuffer* buffer;
public:
// for sequence header, whether parse the h.264 sps.
// TODO: FIXME: Refine it.
bool avc_parse_sps;
public:
SrsAvcAacCodec();
virtual ~SrsAvcAacCodec();
SrsFormat();
virtual ~SrsFormat();
public:
// whether avc or aac codec sequence header or extra data is decoded ok.
virtual bool is_avc_codec_ok();
virtual bool is_aac_codec_ok();
// the following function used for hls to build the sample and codec.
// Initialize the format.
virtual int initialize();
// When got a parsed audio packet.
virtual int on_audio(int64_t timestamp, char* data, int size);
// When got a parsed video packet.
virtual int on_video(int64_t timestamp, char* data, int size);
// When got a audio aac sequence header.
virtual int on_aac_sequence_header(char* data, int size);
public:
/**
* demux the audio packet in aac codec.
* the packet mux in FLV/RTMP format defined in flv specification.
* demux the audio speicified data(sound_format, sound_size, ...) to sample.
* demux the aac specified data(aac_profile, ...) to codec from sequence header.
* demux the aac raw to sample units.
*/
virtual int audio_aac_demux(char* data, int size, SrsCodecSample* sample);
virtual int audio_mp3_demux(char* data, int size, SrsCodecSample* sample);
/**
* demux the video packet in h.264 codec.
* the packet mux in FLV/RTMP format defined in flv specification.
* demux the video specified data(frame_type, codec_id, ...) to sample.
* demux the h.264 sepcified data(avc_profile, ...) to codec from sequence header.
* demux the h.264 NALUs to sampe units.
*/
virtual int video_avc_demux(char* data, int size, SrsCodecSample* sample);
virtual bool is_aac_sequence_header();
virtual bool is_avc_sequence_header();
private:
virtual int video_nalu_demux(SrsBuffer* stream, SrsCodecSample* sample);
public:
/**
* directly demux the sequence header, without RTMP packet header.
*/
virtual int audio_aac_sequence_header_demux(char* data, int size);
// Demux the video packet in H.264 codec.
// The packet is muxed in FLV format, defined in flv specification.
// Demux the sps/pps from sequence header.
// Demux the samples from NALUs.
virtual int video_avc_demux(SrsBuffer* stream, int64_t timestamp);
private:
/**
* when avc packet type is SrsCodecVideoAVCTypeSequenceHeader,
* decode the sps and pps.
*/
// Parse the H.264 SPS/PPS.
virtual int avc_demux_sps_pps(SrsBuffer* stream);
/**
* decode the sps rbsp stream.
*/
virtual int avc_demux_sps();
virtual int avc_demux_sps_rbsp(char* rbsp, int nb_rbsp);
/**
* demux the avc NALU in "AnnexB"
* from ISO_IEC_14496-10-AVC-2003.pdf, page 211.
*/
virtual int avc_demux_annexb_format(SrsBuffer* stream, SrsCodecSample* sample);
/**
* demux the avc NALU in "ISO Base Media File Format"
* from ISO_IEC_14496-15-AVC-format-2012.pdf, page 20
*/
virtual int avc_demux_ibmf_format(SrsBuffer* stream, SrsCodecSample* sample);
private:
// Parse the H.264 NALUs.
virtual int video_nalu_demux(SrsBuffer* stream);
// Demux the avc NALU in "AnnexB" from ISO_IEC_14496-10-AVC-2003.pdf, page 211.
virtual int avc_demux_annexb_format(SrsBuffer* stream);
// Demux the avc NALU in "ISO Base Media File Format" from ISO_IEC_14496-15-AVC-format-2012.pdf, page 20
virtual int avc_demux_ibmf_format(SrsBuffer* stream);
private:
// Demux the audio packet in AAC codec.
// Demux the asc from sequence header.
// Demux the sampels from RAW data.
virtual int audio_aac_demux(SrsBuffer* stream, int64_t timestamp);
virtual int audio_mp3_demux(SrsBuffer* stream, int64_t timestamp);
public:
// Directly demux the sequence header, without RTMP packet header.
virtual int audio_aac_sequence_header_demux(char* data, int size);
};
#endif

View file

@ -106,7 +106,6 @@ int SrsMp3Encoder::write_audio(int64_t timestamp, char* data, int size)
// @see: E.4.2 Audio Tags, video_file_format_spec_v10_1.pdf, page 76
int8_t sound_format = stream->read_1bytes();
// @see: SrsAvcAacCodec::audio_aac_demux
//int8_t sound_type = sound_format & 0x01;
//int8_t sound_size = (sound_format >> 1) & 0x01;
//int8_t sound_rate = (sound_format >> 2) & 0x03;

View file

@ -2708,7 +2708,7 @@ int SrsTsPayloadPMT::psi_encode(SrsBuffer* stream)
return ret;
}
SrsTSMuxer::SrsTSMuxer(SrsFileWriter* w, SrsTsContext* c, SrsCodecAudio ac, SrsCodecVideo vc)
SrsTsMuxer::SrsTsMuxer(SrsFileWriter* w, SrsTsContext* c, SrsCodecAudio ac, SrsCodecVideo vc)
{
writer = w;
context = c;
@ -2717,12 +2717,12 @@ SrsTSMuxer::SrsTSMuxer(SrsFileWriter* w, SrsTsContext* c, SrsCodecAudio ac, SrsC
vcodec = vc;
}
SrsTSMuxer::~SrsTSMuxer()
SrsTsMuxer::~SrsTsMuxer()
{
close();
}
int SrsTSMuxer::open(string p)
int SrsTsMuxer::open(string p)
{
int ret = ERROR_SUCCESS;
@ -2740,13 +2740,13 @@ int SrsTSMuxer::open(string p)
return ret;
}
int SrsTSMuxer::update_acodec(SrsCodecAudio ac)
int SrsTsMuxer::update_acodec(SrsCodecAudio ac)
{
acodec = ac;
return ERROR_SUCCESS;
}
int SrsTSMuxer::write_audio(SrsTsMessage* audio)
int SrsTsMuxer::write_audio(SrsTsMessage* audio)
{
int ret = ERROR_SUCCESS;
@ -2762,7 +2762,7 @@ int SrsTSMuxer::write_audio(SrsTsMessage* audio)
return ret;
}
int SrsTSMuxer::write_video(SrsTsMessage* video)
int SrsTsMuxer::write_video(SrsTsMessage* video)
{
int ret = ERROR_SUCCESS;
@ -2778,12 +2778,12 @@ int SrsTSMuxer::write_video(SrsTsMessage* video)
return ret;
}
void SrsTSMuxer::close()
void SrsTsMuxer::close()
{
writer->close();
}
SrsCodecVideo SrsTSMuxer::video_codec()
SrsCodecVideo SrsTsMuxer::video_codec()
{
return vcodec;
}
@ -2800,7 +2800,7 @@ SrsTsCache::~SrsTsCache()
srs_freep(video);
}
int SrsTsCache::cache_audio(SrsAvcAacCodec* codec, int64_t dts, SrsCodecSample* sample)
int SrsTsCache::cache_audio(SrsAudioFrame* frame, int64_t dts)
{
int ret = ERROR_SUCCESS;
@ -2817,16 +2817,16 @@ int SrsTsCache::cache_audio(SrsAvcAacCodec* codec, int64_t dts, SrsCodecSample*
audio->sid = SrsTsPESStreamIdAudioCommon;
// must be aac or mp3
SrsCodecAudio acodec = (SrsCodecAudio)codec->audio_codec_id;
srs_assert(acodec == SrsCodecAudioAAC || acodec == SrsCodecAudioMP3);
SrsAudioCodec* acodec = frame->acodec();
srs_assert(acodec->id == SrsCodecAudioAAC || acodec->id == SrsCodecAudioMP3);
// write video to cache.
if (codec->audio_codec_id == SrsCodecAudioAAC) {
if ((ret = do_cache_aac(codec, sample)) != ERROR_SUCCESS) {
if (acodec->id == SrsCodecAudioAAC) {
if ((ret = do_cache_aac(frame)) != ERROR_SUCCESS) {
return ret;
}
} else {
if ((ret = do_cache_mp3(codec, sample)) != ERROR_SUCCESS) {
if ((ret = do_cache_mp3(frame)) != ERROR_SUCCESS) {
return ret;
}
}
@ -2834,52 +2834,55 @@ int SrsTsCache::cache_audio(SrsAvcAacCodec* codec, int64_t dts, SrsCodecSample*
return ret;
}
int SrsTsCache::cache_video(SrsAvcAacCodec* codec, int64_t dts, SrsCodecSample* sample)
int SrsTsCache::cache_video(SrsVideoFrame* frame, int64_t dts)
{
int ret = ERROR_SUCCESS;
// create the ts video message.
if (!video) {
video = new SrsTsMessage();
video->write_pcr = sample->frame_type == SrsCodecVideoAVCFrameKeyFrame;
video->write_pcr = (frame->frame_type == SrsCodecVideoAVCFrameKeyFrame);
video->start_pts = dts;
}
video->dts = dts;
video->pts = video->dts + sample->cts * 90;
video->pts = video->dts + frame->cts * 90;
video->sid = SrsTsPESStreamIdVideoCommon;
// write video to cache.
if ((ret = do_cache_avc(codec, sample)) != ERROR_SUCCESS) {
if ((ret = do_cache_avc(frame)) != ERROR_SUCCESS) {
return ret;
}
return ret;
}
int SrsTsCache::do_cache_mp3(SrsAvcAacCodec* codec, SrsCodecSample* sample)
int SrsTsCache::do_cache_mp3(SrsAudioFrame* frame)
{
int ret = ERROR_SUCCESS;
// for mp3, directly write to cache.
// TODO: FIXME: implements the ts jitter.
for (int i = 0; i < sample->nb_sample_units; i++) {
SrsCodecSampleUnit* sample_unit = &sample->sample_units[i];
audio->payload->append(sample_unit->bytes, sample_unit->size);
for (int i = 0; i < frame->nb_samples; i++) {
SrsSample* sample = &frame->samples[i];
audio->payload->append(sample->bytes, sample->size);
}
return ret;
}
int SrsTsCache::do_cache_aac(SrsAvcAacCodec* codec, SrsCodecSample* sample)
int SrsTsCache::do_cache_aac(SrsAudioFrame* frame)
{
int ret = ERROR_SUCCESS;
for (int i = 0; i < sample->nb_sample_units; i++) {
SrsCodecSampleUnit* sample_unit = &sample->sample_units[i];
int32_t size = sample_unit->size;
SrsAudioCodec* codec = frame->acodec();
srs_assert(codec);
for (int i = 0; i < frame->nb_samples; i++) {
SrsSample* sample = &frame->samples[i];
int32_t size = sample->size;
if (!sample_unit->bytes || size <= 0 || size > 0x1fff) {
if (!sample->bytes || size <= 0 || size > 0x1fff) {
ret = ERROR_HLS_AAC_FRAME_LENGTH;
srs_error("invalid aac frame length=%d, ret=%d", size, ret);
return ret;
@ -2933,7 +2936,7 @@ int SrsTsCache::do_cache_aac(SrsAvcAacCodec* codec, SrsCodecSample* sample)
// copy to audio buffer
audio->payload->append((const char*)adts_header, sizeof(adts_header));
audio->payload->append(sample_unit->bytes, sample_unit->size);
audio->payload->append(sample->bytes, sample->size);
}
return ret;
@ -2995,7 +2998,7 @@ void srs_avc_insert_aud(SrsSimpleStream* payload, bool& aud_inserted)
}
}
int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
int SrsTsCache::do_cache_avc(SrsVideoFrame* frame)
{
int ret = ERROR_SUCCESS;
@ -3003,7 +3006,7 @@ int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
bool aud_inserted = false;
// Insert a default AUD NALU when no AUD in samples.
if (!sample->has_aud) {
if (!frame->has_aud) {
// the aud(access unit delimiter) before each frame.
// 7.3.2.4 Access unit delimiter RBSP syntax
// ISO_IEC_14496-10-AVC-2012.pdf, page 66.
@ -3039,12 +3042,15 @@ int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
video->payload->append((const char*)default_aud_nalu, 2);
}
SrsVideoCodec* codec = frame->vcodec();
srs_assert(codec);
// all sample use cont nalu header, except the sps-pps before IDR frame.
for (int i = 0; i < sample->nb_sample_units; i++) {
SrsCodecSampleUnit* sample_unit = &sample->sample_units[i];
int32_t size = sample_unit->size;
for (int i = 0; i < frame->nb_samples; i++) {
SrsSample* sample = &frame->samples[i];
int32_t size = sample->size;
if (!sample_unit->bytes || size <= 0) {
if (!sample->bytes || size <= 0) {
ret = ERROR_HLS_AVC_SAMPLE_SIZE;
srs_error("invalid avc sample length=%d, ret=%d", size, ret);
return ret;
@ -3052,11 +3058,11 @@ int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
// 5bits, 7.3.1 NAL unit syntax,
// ISO_IEC_14496-10-AVC-2012.pdf, page 83.
SrsAvcNaluType nal_unit_type = (SrsAvcNaluType)(sample_unit->bytes[0] & 0x1f);
SrsAvcNaluType nal_unit_type = (SrsAvcNaluType)(sample->bytes[0] & 0x1f);
// Insert sps/pps before IDR when there is no sps/pps in samples.
// The sps/pps is parsed from sequence header(generally the first flv packet).
if (nal_unit_type == SrsAvcNaluTypeIDR && !sample->has_sps_pps) {
if (nal_unit_type == SrsAvcNaluTypeIDR && !frame->has_sps_pps) {
if (codec->sequenceParameterSetLength > 0) {
srs_avc_insert_aud(video->payload, aud_inserted);
video->payload->append(codec->sequenceParameterSetNALUnit, codec->sequenceParameterSetLength);
@ -3069,7 +3075,7 @@ int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
// Insert the NALU to video in annexb.
srs_avc_insert_aud(video->payload, aud_inserted);
video->payload->append(sample_unit->bytes, sample_unit->size);
video->payload->append(sample->bytes, sample->size);
}
return ret;
@ -3078,8 +3084,7 @@ int SrsTsCache::do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample)
SrsTsEncoder::SrsTsEncoder()
{
writer = NULL;
codec = new SrsAvcAacCodec();
sample = new SrsCodecSample();
format = new SrsFormat();
cache = new SrsTsCache();
context = new SrsTsContext();
muxer = NULL;
@ -3087,8 +3092,7 @@ SrsTsEncoder::SrsTsEncoder()
SrsTsEncoder::~SrsTsEncoder()
{
srs_freep(codec);
srs_freep(sample);
srs_freep(format);
srs_freep(cache);
srs_freep(muxer);
srs_freep(context);
@ -3098,6 +3102,10 @@ int SrsTsEncoder::initialize(SrsFileWriter* fw)
{
int ret = ERROR_SUCCESS;
if ((ret = format->initialize()) != ERROR_SUCCESS) {
return ret;
}
srs_assert(fw);
if (!fw->is_open()) {
@ -3109,7 +3117,7 @@ int SrsTsEncoder::initialize(SrsFileWriter* fw)
writer = fw;
srs_freep(muxer);
muxer = new SrsTSMuxer(fw, context, SrsCodecAudioAAC, SrsCodecVideoAVC);
muxer = new SrsTsMuxer(fw, context, SrsCodecAudioAAC, SrsCodecVideoAVC);
if ((ret = muxer->open("")) != ERROR_SUCCESS) {
return ret;
@ -3122,32 +3130,24 @@ int SrsTsEncoder::write_audio(int64_t timestamp, char* data, int size)
{
int ret = ERROR_SUCCESS;
sample->clear();
if ((ret = codec->audio_aac_demux(data, size, sample)) != ERROR_SUCCESS) {
if (ret != ERROR_HLS_TRY_MP3) {
srs_error("http: ts aac demux audio failed. ret=%d", ret);
return ret;
}
if ((ret = codec->audio_mp3_demux(data, size, sample)) != ERROR_SUCCESS) {
srs_error("http: ts mp3 demux audio failed. ret=%d", ret);
return ret;
}
if ((ret = format->on_audio(timestamp, data, size)) != ERROR_SUCCESS) {
return ret;
}
SrsCodecAudio acodec = (SrsCodecAudio)codec->audio_codec_id;
// ts support audio codec: aac/mp3
if (acodec != SrsCodecAudioAAC && acodec != SrsCodecAudioMP3) {
srs_assert(format->acodec && format->audio);
if (format->acodec->id != SrsCodecAudioAAC && format->acodec->id != SrsCodecAudioMP3) {
return ret;
}
// when codec changed, write new header.
if ((ret = muxer->update_acodec(acodec)) != ERROR_SUCCESS) {
if ((ret = muxer->update_acodec(format->acodec->id)) != ERROR_SUCCESS) {
srs_error("http: ts audio write header failed. ret=%d", ret);
return ret;
}
// for aac: ignore sequence header
if (acodec == SrsCodecAudioAAC && sample->aac_packet_type == SrsCodecAudioTypeSequenceHeader) {
if (format->acodec->id == SrsCodecAudioAAC && format->audio->aac_packet_type == SrsCodecAudioTypeSequenceHeader) {
return ret;
}
@ -3157,7 +3157,7 @@ int SrsTsEncoder::write_audio(int64_t timestamp, char* data, int size)
int64_t dts = timestamp * 90;
// write audio to cache.
if ((ret = cache->cache_audio(codec, dts, sample)) != ERROR_SUCCESS) {
if ((ret = cache->cache_audio(format->audio, dts)) != ERROR_SUCCESS) {
return ret;
}
@ -3172,32 +3172,31 @@ int SrsTsEncoder::write_video(int64_t timestamp, char* data, int size)
{
int ret = ERROR_SUCCESS;
sample->clear();
if ((ret = codec->video_avc_demux(data, size, sample)) != ERROR_SUCCESS) {
srs_error("http: ts codec demux video failed. ret=%d", ret);
if ((ret = format->on_video(timestamp, data, size)) != ERROR_SUCCESS) {
return ret;
}
// ignore info frame,
// @see https://github.com/ossrs/srs/issues/288#issuecomment-69863909
if (sample->frame_type == SrsCodecVideoAVCFrameVideoInfoFrame) {
srs_assert(format->video && format->vcodec);
if (format->video->frame_type == SrsCodecVideoAVCFrameVideoInfoFrame) {
return ret;
}
if (codec->video_codec_id != SrsCodecVideoAVC) {
if (format->vcodec->id != SrsCodecVideoAVC) {
return ret;
}
// ignore sequence header
if (sample->frame_type == SrsCodecVideoAVCFrameKeyFrame
&& sample->avc_packet_type == SrsCodecVideoAVCTypeSequenceHeader) {
if (format->video->frame_type == SrsCodecVideoAVCFrameKeyFrame
&& format->video->avc_packet_type == SrsCodecVideoAVCTypeSequenceHeader) {
return ret;
}
int64_t dts = timestamp * 90;
// write video to cache.
if ((ret = cache->cache_video(codec, dts, sample)) != ERROR_SUCCESS) {
if ((ret = cache->cache_video(format->video, dts)) != ERROR_SUCCESS) {
return ret;
}

View file

@ -42,8 +42,7 @@ class SrsTsCache;
class SrsTSMuxer;
class SrsFileWriter;
class SrsFileReader;
class SrsAvcAacCodec;
class SrsCodecSample;
class SrsFormat;
class SrsSimpleStream;
class SrsTsAdaptationField;
class SrsTsPayload;
@ -1560,7 +1559,7 @@ protected:
* write data from frame(header info) and buffer(data) to ts file.
* it's a simple object wrapper for utility from nginx-rtmp: SrsMpegtsWriter
*/
class SrsTSMuxer
class SrsTsMuxer
{
private:
SrsCodecVideo vcodec;
@ -1570,8 +1569,8 @@ private:
SrsFileWriter* writer;
std::string path;
public:
SrsTSMuxer(SrsFileWriter* w, SrsTsContext* c, SrsCodecAudio ac, SrsCodecVideo vc);
virtual ~SrsTSMuxer();
SrsTsMuxer(SrsFileWriter* w, SrsTsContext* c, SrsCodecAudio ac, SrsCodecVideo vc);
virtual ~SrsTsMuxer();
public:
/**
* open the writer, donot write the PSI of ts.
@ -1585,6 +1584,7 @@ public:
* @remark for audio aac codec, for example, SRS1, it's ok to write PSI when open ts.
* @see https://github.com/ossrs/srs/issues/301
*/
// TODO: FIXME: Remove it.
virtual int update_acodec(SrsCodecAudio ac);
/**
* write an audio frame to ts,
@ -1628,29 +1628,29 @@ public:
/**
* write audio to cache
*/
virtual int cache_audio(SrsAvcAacCodec* codec, int64_t dts, SrsCodecSample* sample);
virtual int cache_audio(SrsAudioFrame* frame, int64_t dts);
/**
* write video to muxer.
*/
virtual int cache_video(SrsAvcAacCodec* codec, int64_t dts, SrsCodecSample* sample);
virtual int cache_video(SrsVideoFrame* frame, int64_t dts);
private:
virtual int do_cache_mp3(SrsAvcAacCodec* codec, SrsCodecSample* sample);
virtual int do_cache_aac(SrsAvcAacCodec* codec, SrsCodecSample* sample);
virtual int do_cache_avc(SrsAvcAacCodec* codec, SrsCodecSample* sample);
virtual int do_cache_mp3(SrsAudioFrame* frame);
virtual int do_cache_aac(SrsAudioFrame* frame);
virtual int do_cache_avc(SrsVideoFrame* frame);
};
/**
* encode data to ts file.
*/
// TODO: FIXME: Rename it.
class SrsTsEncoder
{
private:
SrsFileWriter* writer;
private:
SrsAvcAacCodec* codec;
SrsCodecSample* sample;
SrsFormat* format;
SrsTsCache* cache;
SrsTSMuxer* muxer;
SrsTsMuxer* muxer;
SrsTsContext* context;
public:
SrsTsEncoder();

View file

@ -26,24 +26,19 @@
#include <srs_kernel_error.hpp>
#include <srs_kernel_codec.hpp>
#include <srs_rtmp_stack.hpp>
#include <srs_kernel_buffer.hpp>
#include <srs_core_autofree.hpp>
#include <srs_kernel_utility.hpp>
SrsFormat::SrsFormat()
SrsRtmpFormat::SrsRtmpFormat()
{
audio = video = NULL;
}
SrsFormat::~SrsFormat()
SrsRtmpFormat::~SrsRtmpFormat()
{
srs_freep(audio);
srs_freep(video);
}
int SrsFormat::initialize()
{
return ERROR_SUCCESS;
}
int SrsFormat::on_metadata(SrsOnMetaDataPacket* meta)
int SrsRtmpFormat::on_metadata(SrsOnMetaDataPacket* meta)
{
int ret = ERROR_SUCCESS;
@ -52,15 +47,31 @@ int SrsFormat::on_metadata(SrsOnMetaDataPacket* meta)
return ret;
}
int SrsFormat::on_audio(SrsSharedPtrMessage* shared_audio)
int SrsRtmpFormat::on_audio(SrsSharedPtrMessage* shared_audio)
{
int ret = ERROR_SUCCESS;
return ret;
SrsSharedPtrMessage* msg = shared_audio;
char* data = msg->payload;
int size = msg->size;
return SrsFormat::on_audio(msg->timestamp, data, size);
}
int SrsFormat::on_video(SrsSharedPtrMessage* shared_video, bool is_sequence_header)
int SrsRtmpFormat::on_audio(int64_t timestamp, char* data, int size)
{
int ret = ERROR_SUCCESS;
return ret;
return SrsFormat::on_audio(timestamp, data, size);
}
int SrsRtmpFormat::on_video(SrsSharedPtrMessage* shared_video)
{
SrsSharedPtrMessage* msg = shared_video;
char* data = msg->payload;
int size = msg->size;
return SrsFormat::on_video(msg->timestamp, data, size);
}
int SrsRtmpFormat::on_video(int64_t timestamp, char* data, int size)
{
return SrsFormat::on_video(timestamp, data, size);
}

View file

@ -30,32 +30,33 @@
#include <srs_core.hpp>
class SrsFrame;
#include <srs_kernel_codec.hpp>
class SrsBuffer;
class SrsAudioFrame;
class SrsVideoFrame;
class SrsAudioCodec;
class SrsVideoCodec;
class SrsOnMetaDataPacket;
class SrsSharedPtrMessage;
/**
* A codec format, including one or many stream, each stream identified by a frame.
* For example, a typical RTMP stream format, consits of a video and audio frame.
* Maybe some RTMP stream only has a audio stream, for instance, redio application.
* Create special structure from RTMP stream, for example, the metadata.
*/
class SrsFormat
class SrsRtmpFormat : public SrsFormat
{
public:
SrsFrame* audio;
SrsFrame* video;
SrsRtmpFormat();
virtual ~SrsRtmpFormat();
public:
SrsFormat();
virtual ~SrsFormat();
public:
// Initialize the format.
virtual int initialize();
// Initialize the format from metadata, optional.
virtual int on_metadata(SrsOnMetaDataPacket* meta);
// When got a parsed audio packet.
virtual int on_audio(SrsSharedPtrMessage* shared_audio);
virtual int on_audio(int64_t timestamp, char* data, int size);
// When got a parsed video packet.
virtual int on_video(SrsSharedPtrMessage* shared_video, bool is_sequence_header);
virtual int on_video(SrsSharedPtrMessage* shared_video);
virtual int on_video(int64_t timestamp, char* data, int size);
};
#endif

View file

@ -136,7 +136,7 @@ SrsRtpPacket::SrsRtpPacket()
ssrc = 0;
payload = new SrsSimpleStream();
audio_samples = new SrsCodecSample();
audio = new SrsAudioFrame();
chunked = false;
completed = false;
}
@ -144,7 +144,7 @@ SrsRtpPacket::SrsRtpPacket()
SrsRtpPacket::~SrsRtpPacket()
{
srs_freep(payload);
srs_freep(audio_samples);
srs_freep(audio);
}
void SrsRtpPacket::copy(SrsRtpPacket* src)
@ -161,7 +161,9 @@ void SrsRtpPacket::copy(SrsRtpPacket* src)
chunked = src->chunked;
completed = src->completed;
audio_samples = new SrsCodecSample();
srs_freep(audio);
audio = new SrsAudioFrame();
}
void SrsRtpPacket::reap(SrsRtpPacket* src)
@ -172,9 +174,9 @@ void SrsRtpPacket::reap(SrsRtpPacket* src)
payload = src->payload;
src->payload = NULL;
srs_freep(audio_samples);
audio_samples = src->audio_samples;
src->audio_samples = NULL;
srs_freep(audio);
audio = src->audio;
src->audio = NULL;
}
int SrsRtpPacket::decode(SrsBuffer* stream)
@ -263,7 +265,7 @@ int SrsRtpPacket::decode_97(SrsBuffer* stream)
return ret;
}
if ((ret = audio_samples->add_sample_unit(sample, sample_size)) != ERROR_SUCCESS) {
if ((ret = audio->add_sample(sample, sample_size)) != ERROR_SUCCESS) {
srs_error("rtsp: rtp type97 add sample failed. ret=%d", ret);
return ret;
}

View file

@ -41,7 +41,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
class SrsBuffer;
class SrsSimpleStream;
class SrsCodecSample;
class SrsAudioFrame;
class ISrsProtocolReaderWriter;
// rtsp specification
@ -299,7 +299,7 @@ public:
/**
* the audio samples, one rtp packets may contains multiple audio samples.
*/
SrsCodecSample* audio_samples;
SrsAudioFrame* audio;
public:
SrsRtpPacket();
virtual ~SrsRtpPacket();