winlin

abs overflow also plus the deviation, for pure audio hls.

@@ -559,7 +559,13 @@ bool SrsHlsMuxer::is_segment_absolutely_overflow() @@ -559,7 +559,13 @@ bool SrsHlsMuxer::is_segment_absolutely_overflow()
559 { 559 {
560 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950 560 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950
561 srs_assert(current); 561 srs_assert(current);
562 - return current->duration >= hls_aof_ratio * hls_fragment; 562 +
  563 + // use N% deviation, to smoother.
  564 + double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
  565 + srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
  566 + current->duration, hls_fragment + deviation, deviation, deviation_ts, hls_fragment);
  567 +
  568 + return current->duration >= hls_aof_ratio * hls_fragment + deviation;
563 } 569 }
564 570
565 int SrsHlsMuxer::update_acodec(SrsCodecAudio ac) 571 int SrsHlsMuxer::update_acodec(SrsCodecAudio ac)
@@ -968,7 +974,7 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t @@ -968,7 +974,7 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
968 // we use absolutely overflow of segment to make jwplayer/ffplay happy 974 // we use absolutely overflow of segment to make jwplayer/ffplay happy
969 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184 975 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184
970 if (cache->audio && muxer->is_segment_absolutely_overflow()) { 976 if (cache->audio && muxer->is_segment_absolutely_overflow()) {
971 - srs_warn("hls: absolute audio reap segment."); 977 + srs_info("hls: absolute audio reap segment.");
972 if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) { 978 if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) {
973 return ret; 979 return ret;
974 } 980 }
@@ -991,7 +997,7 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t @@ -991,7 +997,7 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
991 // do reap ts if any of: 997 // do reap ts if any of:
992 // a. wait keyframe and got keyframe. 998 // a. wait keyframe and got keyframe.
993 // b. always reap when not wait keyframe. 999 // b. always reap when not wait keyframe.
994 - if (!muxer->wait_keyframe() || sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) { 1000 + if (!muxer->wait_keyframe()|| sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) {
995 // when wait keyframe, there must exists idr frame in sample. 1001 // when wait keyframe, there must exists idr frame in sample.
996 if (!sample->has_idr && muxer->wait_keyframe()) { 1002 if (!sample->has_idr && muxer->wait_keyframe()) {
997 srs_warn("hls: ts starts without IDR, first nalu=%d, idr=%d", sample->first_nalu_type, sample->has_idr); 1003 srs_warn("hls: ts starts without IDR, first nalu=%d, idr=%d", sample->first_nalu_type, sample->has_idr);
@@ -78,6 +78,7 @@ SrsTsChannel::SrsTsChannel() @@ -78,6 +78,7 @@ SrsTsChannel::SrsTsChannel()
78 stream = SrsTsStreamReserved; 78 stream = SrsTsStreamReserved;
79 msg = NULL; 79 msg = NULL;
80 continuity_counter = 0; 80 continuity_counter = 0;
  81 + context = NULL;
81 } 82 }
82 83
83 SrsTsChannel::~SrsTsChannel() 84 SrsTsChannel::~SrsTsChannel()
@@ -196,6 +197,7 @@ ISrsTsHandler::~ISrsTsHandler() @@ -196,6 +197,7 @@ ISrsTsHandler::~ISrsTsHandler()
196 197
197 SrsTsContext::SrsTsContext() 198 SrsTsContext::SrsTsContext()
198 { 199 {
  200 + pure_audio = false;
199 vcodec = SrsCodecVideoReserved; 201 vcodec = SrsCodecVideoReserved;
200 acodec = SrsCodecAudioReserved1; 202 acodec = SrsCodecAudioReserved1;
201 } 203 }
@@ -210,6 +212,24 @@ SrsTsContext::~SrsTsContext() @@ -210,6 +212,24 @@ SrsTsContext::~SrsTsContext()
210 pids.clear(); 212 pids.clear();
211 } 213 }
212 214
  215 +bool SrsTsContext::is_pure_audio()
  216 +{
  217 + return pure_audio;
  218 +}
  219 +
  220 +void SrsTsContext::on_pmt_parsed()
  221 +{
  222 + pure_audio = true;
  223 +
  224 + std::map<int, SrsTsChannel*>::iterator it;
  225 + for (it = pids.begin(); it != pids.end(); ++it) {
  226 + SrsTsChannel* channel = it->second;
  227 + if (channel->apply == SrsTsPidApplyVideo) {
  228 + pure_audio = false;
  229 + }
  230 + }
  231 +}
  232 +
213 void SrsTsContext::reset() 233 void SrsTsContext::reset()
214 { 234 {
215 vcodec = SrsCodecVideoReserved; 235 vcodec = SrsCodecVideoReserved;
@@ -230,6 +250,7 @@ void SrsTsContext::set(int pid, SrsTsPidApply apply_pid, SrsTsStream stream) @@ -230,6 +250,7 @@ void SrsTsContext::set(int pid, SrsTsPidApply apply_pid, SrsTsStream stream)
230 250
231 if (pids.find(pid) == pids.end()) { 251 if (pids.find(pid) == pids.end()) {
232 channel = new SrsTsChannel(); 252 channel = new SrsTsChannel();
  253 + channel->context = this;
233 pids[pid] = channel; 254 pids[pid] = channel;
234 } else { 255 } else {
235 channel = pids[pid]; 256 channel = pids[pid];
@@ -2302,6 +2323,7 @@ int SrsTsPayloadPAT::psi_decode(SrsStream* stream) @@ -2302,6 +2323,7 @@ int SrsTsPayloadPAT::psi_decode(SrsStream* stream)
2302 2323
2303 // update the apply pid table. 2324 // update the apply pid table.
2304 packet->context->set(packet->pid, SrsTsPidApplyPAT); 2325 packet->context->set(packet->pid, SrsTsPidApplyPAT);
  2326 + packet->context->on_pmt_parsed();
2305 2327
2306 return ret; 2328 return ret;
2307 } 2329 }
@@ -172,6 +172,7 @@ struct SrsTsChannel @@ -172,6 +172,7 @@ struct SrsTsChannel
172 SrsTsPidApply apply; 172 SrsTsPidApply apply;
173 SrsTsStream stream; 173 SrsTsStream stream;
174 SrsTsMessage* msg; 174 SrsTsMessage* msg;
  175 + SrsTsContext* context;
175 // for encoder. 176 // for encoder.
176 u_int8_t continuity_counter; 177 u_int8_t continuity_counter;
177 178
@@ -343,6 +344,7 @@ class SrsTsContext @@ -343,6 +344,7 @@ class SrsTsContext
343 // codec 344 // codec
344 private: 345 private:
345 std::map<int, SrsTsChannel*> pids; 346 std::map<int, SrsTsChannel*> pids;
  347 + bool pure_audio;
346 // encoder 348 // encoder
347 private: 349 private:
348 // when any codec changed, write the PAT/PMT. 350 // when any codec changed, write the PAT/PMT.
@@ -353,6 +355,14 @@ public: @@ -353,6 +355,14 @@ public:
353 virtual ~SrsTsContext(); 355 virtual ~SrsTsContext();
354 public: 356 public:
355 /** 357 /**
  358 + * whether the hls stream is pure audio stream.
  359 + */
  360 + virtual bool is_pure_audio();
  361 + /**
  362 + * when PMT table parsed, we know some info about stream.
  363 + */
  364 + virtual void on_pmt_parsed();
  365 + /**
356 * reset the context for a new ts segment start. 366 * reset the context for a new ts segment start.
357 */ 367 */
358 virtual void reset(); 368 virtual void reset();
@@ -561,7 +561,7 @@ void SrsIngestSrsInput::fetch_all_ts(bool fresh_m3u8) @@ -561,7 +561,7 @@ void SrsIngestSrsInput::fetch_all_ts(bool fresh_m3u8)
561 } 561 }
562 562
563 // only wait for a duration of last piece. 563 // only wait for a duration of last piece.
564 - if (i == pieces.size() - 1) { 564 + if (i == (int)pieces.size() - 1) {
565 next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000; 565 next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000;
566 } 566 }
567 } 567 }
@@ -865,28 +865,34 @@ int SrsIngestSrsOutput::parse_message_queue() @@ -865,28 +865,34 @@ int SrsIngestSrsOutput::parse_message_queue()
865 { 865 {
866 int ret = ERROR_SUCCESS; 866 int ret = ERROR_SUCCESS;
867 867
  868 + if (queue.empty()) {
  869 + return ret;
  870 + }
  871 +
  872 + SrsTsMessage* first_ts_msg = queue.begin()->second;
  873 + SrsTsContext* context = first_ts_msg->channel->context;
  874 +
868 int nb_videos = 0; 875 int nb_videos = 0;
869 - int nb_audios = 0;  
870 - std::multimap<int64_t, SrsTsMessage*>::iterator it;  
871 - for (it = queue.begin(); it != queue.end(); ++it) {  
872 - SrsTsMessage* msg = it->second; 876 + if (!context->is_pure_audio()) {
  877 + std::multimap<int64_t, SrsTsMessage*>::iterator it;
  878 + for (it = queue.begin(); it != queue.end(); ++it) {
  879 + SrsTsMessage* msg = it->second;
  880 +
  881 + // publish audio or video.
  882 + if (msg->channel->stream == SrsTsStreamVideoH264) {
  883 + nb_videos++;
  884 + }
  885 + }
873 886
874 - // publish audio or video.  
875 - if (msg->channel->stream == SrsTsStreamVideoH264) {  
876 - nb_videos++;  
877 - } else {  
878 - nb_audios++; 887 + // always wait 2+ videos, to left one video in the queue.
  888 + // TODO: FIXME: support pure audio hls.
  889 + if (nb_videos <= 1) {
  890 + return ret;
879 } 891 }
880 } 892 }
881 893
882 - // always wait 2+ videos, to left one video in the queue.  
883 - // TODO: FIXME: support pure audio hls.  
884 - if (nb_videos <= 1) {  
885 - return ret;  
886 - }  
887 -  
888 // parse messages util the last video. 894 // parse messages util the last video.
889 - while (nb_videos > 1 && queue.size() > 0) { 895 + while ((nb_videos > 1 || context->is_pure_audio()) && queue.size() > 0) {
890 std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin(); 896 std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin();
891 897
892 SrsTsMessage* msg = it->second; 898 SrsTsMessage* msg = it->second;