winlin

Merge branch '2.0release' into develop

@@ -542,6 +542,11 @@ bool SrsHlsMuxer::is_segment_overflow() @@ -542,6 +542,11 @@ bool SrsHlsMuxer::is_segment_overflow()
542 { 542 {
543 srs_assert(current); 543 srs_assert(current);
544 544
  545 + // to prevent very small segment.
  546 + if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
  547 + return false;
  548 + }
  549 +
545 // use N% deviation, to smoother. 550 // use N% deviation, to smoother.
546 double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0; 551 double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
547 srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f", 552 srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
@@ -559,7 +564,18 @@ bool SrsHlsMuxer::is_segment_absolutely_overflow() @@ -559,7 +564,18 @@ bool SrsHlsMuxer::is_segment_absolutely_overflow()
559 { 564 {
560 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950 565 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-83553950
561 srs_assert(current); 566 srs_assert(current);
562 - return current->duration >= hls_aof_ratio * hls_fragment; 567 +
  568 + // to prevent very small segment.
  569 + if (current->duration * 1000 < 2 * SRS_AUTO_HLS_SEGMENT_MIN_DURATION_MS) {
  570 + return false;
  571 + }
  572 +
  573 + // use N% deviation, to smoother.
  574 + double deviation = hls_ts_floor? SRS_HLS_FLOOR_REAP_PERCENT * deviation_ts * hls_fragment : 0.0;
  575 + srs_info("hls: dur=%.2f, tar=%.2f, dev=%.2fms/%dp, frag=%.2f",
  576 + current->duration, hls_fragment + deviation, deviation, deviation_ts, hls_fragment);
  577 +
  578 + return current->duration >= hls_aof_ratio * hls_fragment + deviation;
563 } 579 }
564 580
565 int SrsHlsMuxer::update_acodec(SrsCodecAudio ac) 581 int SrsHlsMuxer::update_acodec(SrsCodecAudio ac)
@@ -968,7 +984,7 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t @@ -968,7 +984,7 @@ int SrsHlsCache::write_audio(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
968 // we use absolutely overflow of segment to make jwplayer/ffplay happy 984 // we use absolutely overflow of segment to make jwplayer/ffplay happy
969 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184 985 // @see https://github.com/winlinvip/simple-rtmp-server/issues/151#issuecomment-71155184
970 if (cache->audio && muxer->is_segment_absolutely_overflow()) { 986 if (cache->audio && muxer->is_segment_absolutely_overflow()) {
971 - srs_warn("hls: absolute audio reap segment."); 987 + srs_info("hls: absolute audio reap segment.");
972 if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) { 988 if ((ret = reap_segment("audio", muxer, cache->audio->pts)) != ERROR_SUCCESS) {
973 return ret; 989 return ret;
974 } 990 }
@@ -991,7 +1007,7 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t @@ -991,7 +1007,7 @@ int SrsHlsCache::write_video(SrsAvcAacCodec* codec, SrsHlsMuxer* muxer, int64_t
991 // do reap ts if any of: 1007 // do reap ts if any of:
992 // a. wait keyframe and got keyframe. 1008 // a. wait keyframe and got keyframe.
993 // b. always reap when not wait keyframe. 1009 // b. always reap when not wait keyframe.
994 - if (!muxer->wait_keyframe() || sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) { 1010 + if (!muxer->wait_keyframe()|| sample->frame_type == SrsCodecVideoAVCFrameKeyFrame) {
995 // when wait keyframe, there must exists idr frame in sample. 1011 // when wait keyframe, there must exists idr frame in sample.
996 if (!sample->has_idr && muxer->wait_keyframe()) { 1012 if (!sample->has_idr && muxer->wait_keyframe()) {
997 srs_warn("hls: ts starts without IDR, first nalu=%d, idr=%d", sample->first_nalu_type, sample->has_idr); 1013 srs_warn("hls: ts starts without IDR, first nalu=%d, idr=%d", sample->first_nalu_type, sample->has_idr);
@@ -78,6 +78,7 @@ SrsTsChannel::SrsTsChannel() @@ -78,6 +78,7 @@ SrsTsChannel::SrsTsChannel()
78 stream = SrsTsStreamReserved; 78 stream = SrsTsStreamReserved;
79 msg = NULL; 79 msg = NULL;
80 continuity_counter = 0; 80 continuity_counter = 0;
  81 + context = NULL;
81 } 82 }
82 83
83 SrsTsChannel::~SrsTsChannel() 84 SrsTsChannel::~SrsTsChannel()
@@ -196,6 +197,7 @@ ISrsTsHandler::~ISrsTsHandler() @@ -196,6 +197,7 @@ ISrsTsHandler::~ISrsTsHandler()
196 197
197 SrsTsContext::SrsTsContext() 198 SrsTsContext::SrsTsContext()
198 { 199 {
  200 + pure_audio = false;
199 vcodec = SrsCodecVideoReserved; 201 vcodec = SrsCodecVideoReserved;
200 acodec = SrsCodecAudioReserved1; 202 acodec = SrsCodecAudioReserved1;
201 } 203 }
@@ -210,6 +212,24 @@ SrsTsContext::~SrsTsContext() @@ -210,6 +212,24 @@ SrsTsContext::~SrsTsContext()
210 pids.clear(); 212 pids.clear();
211 } 213 }
212 214
  215 +bool SrsTsContext::is_pure_audio()
  216 +{
  217 + return pure_audio;
  218 +}
  219 +
  220 +void SrsTsContext::on_pmt_parsed()
  221 +{
  222 + pure_audio = true;
  223 +
  224 + std::map<int, SrsTsChannel*>::iterator it;
  225 + for (it = pids.begin(); it != pids.end(); ++it) {
  226 + SrsTsChannel* channel = it->second;
  227 + if (channel->apply == SrsTsPidApplyVideo) {
  228 + pure_audio = false;
  229 + }
  230 + }
  231 +}
  232 +
213 void SrsTsContext::reset() 233 void SrsTsContext::reset()
214 { 234 {
215 vcodec = SrsCodecVideoReserved; 235 vcodec = SrsCodecVideoReserved;
@@ -230,6 +250,7 @@ void SrsTsContext::set(int pid, SrsTsPidApply apply_pid, SrsTsStream stream) @@ -230,6 +250,7 @@ void SrsTsContext::set(int pid, SrsTsPidApply apply_pid, SrsTsStream stream)
230 250
231 if (pids.find(pid) == pids.end()) { 251 if (pids.find(pid) == pids.end()) {
232 channel = new SrsTsChannel(); 252 channel = new SrsTsChannel();
  253 + channel->context = this;
233 pids[pid] = channel; 254 pids[pid] = channel;
234 } else { 255 } else {
235 channel = pids[pid]; 256 channel = pids[pid];
@@ -2302,6 +2323,7 @@ int SrsTsPayloadPAT::psi_decode(SrsStream* stream) @@ -2302,6 +2323,7 @@ int SrsTsPayloadPAT::psi_decode(SrsStream* stream)
2302 2323
2303 // update the apply pid table. 2324 // update the apply pid table.
2304 packet->context->set(packet->pid, SrsTsPidApplyPAT); 2325 packet->context->set(packet->pid, SrsTsPidApplyPAT);
  2326 + packet->context->on_pmt_parsed();
2305 2327
2306 return ret; 2328 return ret;
2307 } 2329 }
@@ -172,6 +172,7 @@ struct SrsTsChannel @@ -172,6 +172,7 @@ struct SrsTsChannel
172 SrsTsPidApply apply; 172 SrsTsPidApply apply;
173 SrsTsStream stream; 173 SrsTsStream stream;
174 SrsTsMessage* msg; 174 SrsTsMessage* msg;
  175 + SrsTsContext* context;
175 // for encoder. 176 // for encoder.
176 u_int8_t continuity_counter; 177 u_int8_t continuity_counter;
177 178
@@ -343,6 +344,7 @@ class SrsTsContext @@ -343,6 +344,7 @@ class SrsTsContext
343 // codec 344 // codec
344 private: 345 private:
345 std::map<int, SrsTsChannel*> pids; 346 std::map<int, SrsTsChannel*> pids;
  347 + bool pure_audio;
346 // encoder 348 // encoder
347 private: 349 private:
348 // when any codec changed, write the PAT/PMT. 350 // when any codec changed, write the PAT/PMT.
@@ -353,6 +355,14 @@ public: @@ -353,6 +355,14 @@ public:
353 virtual ~SrsTsContext(); 355 virtual ~SrsTsContext();
354 public: 356 public:
355 /** 357 /**
  358 + * whether the hls stream is pure audio stream.
  359 + */
  360 + virtual bool is_pure_audio();
  361 + /**
  362 + * when PMT table parsed, we know some info about stream.
  363 + */
  364 + virtual void on_pmt_parsed();
  365 + /**
356 * reset the context for a new ts segment start. 366 * reset the context for a new ts segment start.
357 */ 367 */
358 virtual void reset(); 368 virtual void reset();
@@ -561,7 +561,7 @@ void SrsIngestSrsInput::fetch_all_ts(bool fresh_m3u8) @@ -561,7 +561,7 @@ void SrsIngestSrsInput::fetch_all_ts(bool fresh_m3u8)
561 } 561 }
562 562
563 // only wait for a duration of last piece. 563 // only wait for a duration of last piece.
564 - if (i == pieces.size() - 1) { 564 + if (i == (int)pieces.size() - 1) {
565 next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000; 565 next_connect_time = srs_update_system_time_ms() + (int)tp->duration * 1000;
566 } 566 }
567 } 567 }
@@ -657,7 +657,7 @@ public: @@ -657,7 +657,7 @@ public:
657 SrsIngestSrsOutput(SrsHttpUri* rtmp) { 657 SrsIngestSrsOutput(SrsHttpUri* rtmp) {
658 out_rtmp = rtmp; 658 out_rtmp = rtmp;
659 disconnected = false; 659 disconnected = false;
660 - raw_aac_dts = 0; 660 + raw_aac_dts = srs_update_system_time_ms();
661 661
662 req = NULL; 662 req = NULL;
663 io = NULL; 663 io = NULL;
@@ -807,12 +807,14 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration) @@ -807,12 +807,14 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration)
807 { 807 {
808 int ret = ERROR_SUCCESS; 808 int ret = ERROR_SUCCESS;
809 809
  810 + u_int32_t duration_ms = (u_int32_t)(duration * 1000);
  811 +
810 // ts tbn to flv tbn. 812 // ts tbn to flv tbn.
811 u_int32_t dts = (u_int32_t)raw_aac_dts; 813 u_int32_t dts = (u_int32_t)raw_aac_dts;
812 - raw_aac_dts += (int64_t)(duration * 1000); 814 + raw_aac_dts += duration_ms;
813 815
814 // got the next msg to calc the delta duration for each audio. 816 // got the next msg to calc the delta duration for each audio.
815 - u_int32_t max_dts = dts + (u_int32_t)(duration * 1000); 817 + u_int32_t max_dts = dts + duration_ms;
816 818
817 // send each frame. 819 // send each frame.
818 while (!avs->empty()) { 820 while (!avs->empty()) {
@@ -852,7 +854,7 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration) @@ -852,7 +854,7 @@ int SrsIngestSrsOutput::do_on_aac_frame(SrsStream* avs, double duration)
852 } 854 }
853 855
854 // calc the delta of dts, when previous frame output. 856 // calc the delta of dts, when previous frame output.
855 - u_int32_t delta = (duration * 1000) / (avs->size() / frame_size); 857 + u_int32_t delta = duration_ms / (avs->size() / frame_size);
856 dts = (u_int32_t)(srs_min(max_dts, dts + delta)); 858 dts = (u_int32_t)(srs_min(max_dts, dts + delta));
857 } 859 }
858 860
@@ -863,28 +865,36 @@ int SrsIngestSrsOutput::parse_message_queue() @@ -863,28 +865,36 @@ int SrsIngestSrsOutput::parse_message_queue()
863 { 865 {
864 int ret = ERROR_SUCCESS; 866 int ret = ERROR_SUCCESS;
865 867
  868 + if (queue.empty()) {
  869 + return ret;
  870 + }
  871 +
  872 + SrsTsMessage* first_ts_msg = queue.begin()->second;
  873 + SrsTsContext* context = first_ts_msg->channel->context;
  874 + bool cpa = context->is_pure_audio();
  875 +
866 int nb_videos = 0; 876 int nb_videos = 0;
867 - int nb_audios = 0;  
868 - std::multimap<int64_t, SrsTsMessage*>::iterator it;  
869 - for (it = queue.begin(); it != queue.end(); ++it) {  
870 - SrsTsMessage* msg = it->second; 877 + if (!cpa) {
  878 + std::multimap<int64_t, SrsTsMessage*>::iterator it;
  879 + for (it = queue.begin(); it != queue.end(); ++it) {
  880 + SrsTsMessage* msg = it->second;
  881 +
  882 + // publish audio or video.
  883 + if (msg->channel->stream == SrsTsStreamVideoH264) {
  884 + nb_videos++;
  885 + }
  886 + }
871 887
872 - // publish audio or video.  
873 - if (msg->channel->stream == SrsTsStreamVideoH264) {  
874 - nb_videos++;  
875 - } else {  
876 - nb_audios++; 888 + // always wait 2+ videos, to left one video in the queue.
  889 + // TODO: FIXME: support pure audio hls.
  890 + if (nb_videos <= 1) {
  891 + return ret;
877 } 892 }
878 } 893 }
879 894
880 - // always wait 2+ videos, to left one video in the queue.  
881 - // TODO: FIXME: support pure audio hls.  
882 - if (nb_videos <= 1) {  
883 - return ret;  
884 - }  
885 -  
886 // parse messages util the last video. 895 // parse messages util the last video.
887 - while (nb_videos > 1 && queue.size() > 0) { 896 + while ((cpa && queue.size() > 1) || nb_videos > 1) {
  897 + srs_assert(!queue.empty());
888 std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin(); 898 std::multimap<int64_t, SrsTsMessage*>::iterator it = queue.begin();
889 899
890 SrsTsMessage* msg = it->second; 900 SrsTsMessage* msg = it->second;