mirror of
https://github.com/ossrs/srs.git
synced 2025-02-13 11:51:57 +00:00
Merge branch '2.0release' into develop
This commit is contained in:
commit
114f4447bb
4 changed files with 84 additions and 26 deletions
|
@ -542,6 +542,11 @@ bool SrsHlsMuxer::is_segment_overflow()
|
|||
{
|
||||
srs_assert(current);
|
||||
|
||||
// to prevent very small segment.
|
||||
if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// use N% deviation, to smoother.
|
||||
double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
|
||||
srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
|
||||
|
@ -559,7 +564,18 @@ bool SrsHlsMuxer::is_segment_absolutely_overflow()
|
|||
{
|
||||
// @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950
|
||||
srs_assert(current);
|
||||
return current->duration >= hls_aof_ratio * hls_fragment;
|
||||
|
||||
// to prevent very small segment.
|
||||
if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// use N% deviation, to smoother.
|
||||
double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
|
||||
srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
|
||||
current->duration, hls_fragment + deviation, deviation, deviation_ts, hls_fragment);
|
||||
|
||||
return current->duration >= hls_aof_ratio * hls_fragment + deviation;
|
||||
}
|
||||
|
||||
int SrsHlsMuxer::update_acodec(SrsCodecAudio ac)
|
||||
|
@ -968,7 +984,7 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
|
|||
// we use absolutely overflow of segment to make jwplayer/ffplay happy
|
||||
// @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184
|
||||
if (cache->audio && muxer->is_segment_absolutely_overflow()) {
|
||||
srs_warn("hls: absolute audio reap segment.");
|
||||
srs_info("hls: absolute audio reap segment.");
|
||||
if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ SrsTsChannel::SrsTsChannel()
|
|||
stream = SrsTsStreamReserved;
|
||||
msg = NULL;
|
||||
continuity_counter = 0;
|
||||
context = NULL;
|
||||
}
|
||||
|
||||
SrsTsChannel::~SrsTsChannel()
|
||||
|
@ -196,6 +197,7 @@ ISrsTsHandler::~ISrsTsHandler()
|
|||
|
||||
SrsTsContext::SrsTsContext()
|
||||
{
|
||||
pure_audio = false;
|
||||
vcodec = SrsCodecVideoReserved;
|
||||
acodec = SrsCodecAudioReserved1;
|
||||
}
|
||||
|
@ -210,6 +212,24 @@ SrsTsContext::~SrsTsContext()
|
|||
pids.clear();
|
||||
}
|
||||
|
||||
bool SrsTsContext::is_pure_audio()
|
||||
{
|
||||
return pure_audio;
|
||||
}
|
||||
|
||||
void SrsTsContext::on_pmt_parsed()
|
||||
{
|
||||
pure_audio = true;
|
||||
|
||||
std::map<int, SrsTsChannel*>::iterator it;
|
||||
for (it = pids.begin(); it != pids.end(); ++it) {
|
||||
SrsTsChannel* channel = it->second;
|
||||
if (channel->apply == SrsTsPidApplyVideo) {
|
||||
pure_audio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SrsTsContext::reset()
|
||||
{
|
||||
vcodec = SrsCodecVideoReserved;
|
||||
|
@ -230,6 +250,7 @@ void SrsTsContext::set(int pid, SrsTsPidApply apply_pid, SrsTsStream stream)
|
|||
|
||||
if (pids.find(pid) == pids.end()) {
|
||||
channel = new SrsTsChannel();
|
||||
channel->context = this;
|
||||
pids[pid] = channel;
|
||||
} else {
|
||||
channel = pids[pid];
|
||||
|
@ -2302,6 +2323,7 @@ int SrsTsPayloadPAT::psi_decode(SrsStream* stream)
|
|||
|
||||
// update the apply pid table.
|
||||
packet->context->set(packet->pid, SrsTsPidApplyPAT);
|
||||
packet->context->on_pmt_parsed();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -172,6 +172,7 @@ struct SrsTsChannel
|
|||
SrsTsPidApply apply;
|
||||
SrsTsStream stream;
|
||||
SrsTsMessage* msg;
|
||||
SrsTsContext* context;
|
||||
// for encoder.
|
||||
u_int8_t continuity_counter;
|
||||
|
||||
|
@ -343,6 +344,7 @@ class SrsTsContext
|
|||
// codec
|
||||
private:
|
||||
std::map<int, SrsTsChannel*> pids;
|
||||
bool pure_audio;
|
||||
// encoder
|
||||
private:
|
||||
// when any codec changed, write the PAT/PMT.
|
||||
|
@ -352,6 +354,14 @@ public:
|
|||
SrsTsContext();
|
||||
virtual ~SrsTsContext();
|
||||
public:
|
||||
/**
|
||||
* whether the hls stream is pure audio stream.
|
||||
*/
|
||||
virtual bool is_pure_audio();
|
||||
/**
|
||||
* when PMT table parsed, we know some info about stream.
|
||||
*/
|
||||
virtual void on_pmt_parsed();
|
||||
/**
|
||||
* reset the context for a new ts segment start.
|
||||
*/
|
||||
|
|
|
@ -561,7 +561,7 @@ void SrsIngestSrsInput::fetch_all_ts(bool fresh_m3u8)
|
|||
}
|
||||
|
||||
// only wait for a duration of last piece.
|
||||
if (i == pieces.size() - 1) {
|
||||
if (i == (int)pieces.size() - 1) {
|
||||
next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000;
|
||||
}
|
||||
}
|
||||
|
@ -657,7 +657,7 @@ public:
|
|||
SrsIngestSrsOutput(SrsHttpUri* rtmp) {
|
||||
out_rtmp = rtmp;
|
||||
disconnected = false;
|
||||
raw_aac_dts = 0;
|
||||
raw_aac_dts = srs_update_system_time_ms();
|
||||
|
||||
req = NULL;
|
||||
io = NULL;
|
||||
|
@ -807,12 +807,14 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration)
|
|||
{
|
||||
int ret = ERROR_SUCCESS;
|
||||
|
||||
u_int32_t duration_ms = (u_int32_t)(duration * 1000);
|
||||
|
||||
// ts tbn to flv tbn.
|
||||
u_int32_t dts = (u_int32_t)raw_aac_dts;
|
||||
raw_aac_dts += (int64_t)(duration * 1000);
|
||||
raw_aac_dts += duration_ms;
|
||||
|
||||
// got the next msg to calc the delta duration for each audio.
|
||||
u_int32_t max_dts = dts + (u_int32_t)(duration * 1000);
|
||||
u_int32_t max_dts = dts + duration_ms;
|
||||
|
||||
// send each frame.
|
||||
while (!avs->empty()) {
|
||||
|
@ -852,7 +854,7 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration)
|
|||
}
|
||||
|
||||
// calc the delta of dts, when previous frame output.
|
||||
u_int32_t delta = (duration * 1000) / (avs->size() / frame_size);
|
||||
u_int32_t delta = duration_ms / (avs->size() / frame_size);
|
||||
dts = (u_int32_t)(srs_min(max_dts, dts + delta));
|
||||
}
|
||||
|
||||
|
@ -863,8 +865,16 @@ int SrsIngestSrsOutput::parse_message_queue()
|
|||
{
|
||||
int ret = ERROR_SUCCESS;
|
||||
|
||||
if (queue.empty()) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SrsTsMessage* first_ts_msg = queue.begin()->second;
|
||||
SrsTsContext* context = first_ts_msg->channel->context;
|
||||
bool cpa = context->is_pure_audio();
|
||||
|
||||
int nb_videos = 0;
|
||||
int nb_audios = 0;
|
||||
if (!cpa) {
|
||||
std::multimap<int64_t, SrsTsMessage*>::iterator it;
|
||||
for (it = queue.begin(); it != queue.end(); ++it) {
|
||||
SrsTsMessage* msg = it->second;
|
||||
|
@ -872,8 +882,6 @@ int SrsIngestSrsOutput::parse_message_queue()
|
|||
// publish audio or video.
|
||||
if (msg->channel->stream == SrsTsStreamVideoH264) {
|
||||
nb_videos++;
|
||||
} else {
|
||||
nb_audios++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -882,9 +890,11 @@ int SrsIngestSrsOutput::parse_message_queue()
|
|||
if (nb_videos <= 1) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
// parse messages util the last video.
|
||||
while (nb_videos > 1 && queue.size() > 0) {
|
||||
while ((cpa && queue.size() > 1) || nb_videos > 1) {
|
||||
srs_assert(!queue.empty());
|
||||
std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin();
|
||||
|
||||
SrsTsMessage* msg = it->second;
|
||||
|
|
Loading…
Reference in a new issue