胡斌

add some code about output merged video file,set uid of avdecoder

... ... @@ -21,6 +21,7 @@ CAVDecoder::~CAVDecoder()
int CAVDecoder::add(media_info &info)
{
_media_role = info.m_role;
_uid = info.uid;
if (info.m_type == mt_audio) {
_a_start_time_ms = info.start_time_ms;
_a_end_time_ms = info.end_time_ms;
... ... @@ -42,13 +43,15 @@ int CAVDecoder::add(media_info &info)
if (_end_time_ms < info.end_time_ms) {
_end_time_ms = info.end_time_ms;
}
av_log(NULL, AV_LOG_INFO, "CAVDecoder add info:%lf, %"PRIu64", %"PRIu64"\n", _cur_a_ts_ms, _cur_v_ts_ms, _end_time_ms);
return 0;
}
unsigned int CAVDecoder::getuid()
{
return 0;
return _uid;
}
bool CAVDecoder::get_one_v_frame()
... ... @@ -58,12 +61,13 @@ bool CAVDecoder::get_one_v_frame()
if (_video_info.size()) {
ret = _video_decoder.get_one_frame(&_cur_v_frame, ts);
if (ret == 0) {
_cur_v_ts_ms = _v_start_time_ms + ts;
//_cur_v_ts_ms = _v_start_time_ms + ts;
_cur_v_ts_ms += VFRAME_DURATION_MS;
}
else {
_video_info.pop_front();
if (_cur_v_ts_ms < _end_time_ms) {
_cur_v_ts_ms += 50;//return last v frame
_cur_v_ts_ms += VFRAME_DURATION_MS;//return last v frame
ret = 0;
}
}
... ... @@ -71,7 +75,7 @@ bool CAVDecoder::get_one_v_frame()
if (ret) {//no video decoded
if (_cur_v_ts_ms < _end_time_ms) {//should have as video frame
_cur_v_ts_ms += 50;//return last v frame
_cur_v_ts_ms += VFRAME_DURATION_MS;//return last v frame
ret = 0;
if (!_cur_v_frame) {
_cur_v_frame = get_blank_frame();
... ... @@ -114,9 +118,9 @@ bool CAVDecoder::get_one_a_frame()
int64_t ts;
int ret = -1;
if (_audio_info.size()) {
ret = _audio_decoder.get_one_frame(&_cur_v_frame, ts);
ret = _audio_decoder.get_one_frame(&_cur_a_frame, ts);
if (ret == 0) {
_cur_a_ts_ms = _a_start_time_ms + ts;
_cur_a_ts_ms += AFRAME_DURATION_MS;
}
else {
_audio_info.pop_front();
... ...
... ... @@ -13,7 +13,7 @@ public:
unsigned int getuid();
bool get_one_a_frame();
bool get_one_v_frame();
int64_t _cur_a_ts_ms;
double _cur_a_ts_ms;
int64_t _cur_v_ts_ms;
media_role _media_role;
... ... @@ -29,6 +29,7 @@ protected:
int64_t _v_start_time_ms;
int64_t _v_end_time_ms;
int64_t _end_time_ms;
unsigned int _uid;
private:
AVFrame * get_blank_frame();
... ...
... ... @@ -4,8 +4,10 @@
CAVTranscoder::CAVTranscoder():
_start_time(INT64_MAX),
_all_processed(true),
_one2one(false),
_nOutputWidth(320)
_one2one(true),
_nOutputWidth(320),
_cur_out_v_ts(0),
_cur_out_a_ts(0)
{
if (_one2one) {
_nOutputHeight = 480;
... ... @@ -61,7 +63,7 @@ int64_t CAVTranscoder::transcode()
_all_processed = decoders_got_frame.size() == 0;
mix_and_output_vframe(decoders_got_frame);
_cur_v_time += 50;
_cur_v_time += VFRAME_DURATION_MS;
//sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
while (_cur_a_time < _cur_v_time)
... ... @@ -92,6 +94,25 @@ bool CAVTranscoder::all_processed()
int CAVTranscoder::close()
{
av_write_trailer(_ofmt_ctx);
#if USE_H264BSF
av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
av_bitstream_filter_close(aacbsfc);
#endif
int i;
for (i = 0; i<2; i++)
{
if (_ofmt_ctx && _ofmt_ctx->nb_streams > i && _ofmt_ctx->streams[i] && _ofmt_ctx->streams[i]->codec)
avcodec_close(_ofmt_ctx->streams[i]->codec);
}
if (_ofmt_ctx && !(_ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_close(_ofmt_ctx->pb);
avformat_free_context(_ofmt_ctx);
return 0;
}
... ... @@ -99,20 +120,20 @@ int CAVTranscoder::close()
int CAVTranscoder::open_output_file(const char *filename)
{
AVStream *out_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodecContext *enc_ctx;
AVCodec *encoder;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
if (!ofmt_ctx) {
_ofmt_ctx = NULL;
avformat_alloc_output_context2(&_ofmt_ctx, NULL, NULL, filename);
if (!_ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < 2; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
out_stream = avformat_new_stream(_ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
... ... @@ -120,7 +141,7 @@ int CAVTranscoder::open_output_file(const char *filename)
enc_ctx = out_stream->codec;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
if (_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (0 == i) {
... ... @@ -177,10 +198,10 @@ int CAVTranscoder::open_output_file(const char *filename)
}
}
av_dump_format(ofmt_ctx, 0, filename, 1);
av_dump_format(_ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (!(_ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&_ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
... ... @@ -188,7 +209,7 @@ int CAVTranscoder::open_output_file(const char *filename)
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
ret = avformat_write_header(_ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
... ... @@ -268,6 +289,9 @@ int CAVTranscoder::open_output_file(const char *filename)
int CAVTranscoder::fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y)
{
if (!pSrcFrame){
return 0;
}
for (int i = 0; i < pSrcFrame->height; i++) {
memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]);
}
... ... @@ -288,40 +312,53 @@ int CAVTranscoder::open_output_file(const char *filename)
avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
if (decoders_got_frame.size() == 2){
fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, decoders_got_frame[0]->_media_role == mr_teacher ? 0 : 240);
decoders_got_frame[0]->free_cur_v_frame();
fillDestFrame(pDstFrame, decoders_got_frame[1]->_cur_v_frame, 0, decoders_got_frame[1]->_media_role == mr_teacher ? 0 : 240);
decoders_got_frame[1]->free_cur_v_frame();
}
else {
else if (decoders_got_frame.size() == 1)
{
fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0);
decoders_got_frame[0]->free_cur_v_frame();
//todo: fill the bottom half image with pure color
}
else {
//fill with last image?
}
//fill the timestamp of dest frame
_cur_out_v_ts++;
pDstFrame->pts = _cur_out_v_ts;
pDstFrame->pkt_dts = _cur_out_v_ts;
pDstFrame->pkt_pts = _cur_out_v_ts;
pDstFrame->format = AV_PIX_FMT_YUV420P;
pDstFrame->width = _nOutputWidth;
pDstFrame->height = _nOutputHeight;
//send to encoder
int got_frame = 0;
encode_write_frame(pDstFrame, 0, &got_frame);
delete dstbuf;
return 0;
}
int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
int ret;
int got_frame_local;
AVPacket enc_pkt;
#if 0
int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
stream_index == 0 ? avcodec_encode_video2 : avcodec_encode_audio2;
if (!got_frame)
got_frame = &got_frame_local;
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
/* encode filtered frame */
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
ret = enc_func(_ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
filt_frame, got_frame);
av_frame_free(&filt_frame);
if (ret < 0)
... ... @@ -332,23 +369,23 @@ int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_
/* prepare packet for muxing */
enc_pkt.stream_index = stream_index;
av_packet_rescale_ts(&enc_pkt,
ofmt_ctx->streams[stream_index]->codec->time_base,
ofmt_ctx->streams[stream_index]->time_base);
_ofmt_ctx->streams[stream_index]->codec->time_base,
_ofmt_ctx->streams[stream_index]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
#endif
ret = av_interleaved_write_frame(_ofmt_ctx, &enc_pkt);
return ret;
}
#if 0
static int flush_encoder(unsigned int stream_index)
int CAVTranscoder::flush_encoder(unsigned int stream_index)
{
int ret;
int got_frame;
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
if (!(_ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
CODEC_CAP_DELAY))
return 0;
... ... @@ -362,4 +399,3 @@ static int flush_encoder(unsigned int stream_index)
}
return ret;
}
#endif
\ No newline at end of file
... ...
... ... @@ -17,12 +17,14 @@ public:
protected:
vector < CAVDecoder *> _decoders;
AVFormatContext *ofmt_ctx;
AVFormatContext * _ofmt_ctx;
int64_t _start_time;
int64_t _cur_a_time;
double _cur_a_time;
int64_t _cur_v_time;
int _nOutputWidth;
int _nOutputHeight;
int64_t _cur_out_v_ts;
int64_t _cur_out_a_ts;
private:
int mix_and_output_vframe(vector<CAVDecoder *> & decoders_got_frame);
... ... @@ -33,5 +35,7 @@ private:
int mix_and_output_one2many_vframe(vector<CAVDecoder *> & decoders_got_frame);
int fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y);
int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame);
int flush_encoder(unsigned int stream_index);
};
... ...
... ... @@ -77,5 +77,5 @@ typedef struct FilteringContext {
} FilteringContext;
#define AFRAME_DURATION_MS 20
#define AFRAME_DURATION_MS 0.02133333
#define VFRAME_DURATION_MS 50
\ No newline at end of file
... ...
... ... @@ -597,9 +597,10 @@ void add_media_infos()
m.start_time_ms = f.start_time * 1000;
m.end_time_ms = f.end_time * 1000;
add_media_info(m);
/*
m.t_type = tt_end;
m.type_time = m.end_time;
add_media_info(m);
add_media_info(m);*/
}
list<media_info>::iterator it = sorted_media.begin();
... ... @@ -834,7 +835,7 @@ string get_outmedia_file_name(const char * input)
if (p) {
*p = 0;
}
strcat(out_media_file, "_out.mp4");
strcat(out_media_file, "_out.ts");
return out_media_file;
}
... ...
... ... @@ -21,9 +21,6 @@
<ClCompile Include="tools.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="merge_pip.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="VideoDecoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
... ... @@ -42,6 +39,9 @@
<ClCompile Include="AudioEncoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="merge_pip.cpp">
<Filter>资源文件</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="VideoDecoder.h">
... ...