胡斌

remove unused files

one2one transcode works,need add still frame when one parter has no video but has audio
add some code about swscale,unfinished
@@ -96,8 +96,6 @@ AVFrame * CAVDecoder::get_silence_frame() @@ -96,8 +96,6 @@ AVFrame * CAVDecoder::get_silence_frame()
96 return NULL; 96 return NULL;
97 } 97 }
98 98
99 -  
100 -  
101 void CAVDecoder::free_cur_a_frame() 99 void CAVDecoder::free_cur_a_frame()
102 { 100 {
103 if (_cur_a_frame) { 101 if (_cur_a_frame) {
@@ -105,7 +103,6 @@ void CAVDecoder::free_cur_a_frame() @@ -105,7 +103,6 @@ void CAVDecoder::free_cur_a_frame()
105 } 103 }
106 } 104 }
107 105
108 -  
109 void CAVDecoder::free_cur_v_frame() 106 void CAVDecoder::free_cur_v_frame()
110 { 107 {
111 if (_cur_v_frame) { 108 if (_cur_v_frame) {
1 #include "AVTranscoder.h" 1 #include "AVTranscoder.h"
  2 +extern "C" {
  3 +#include <libswscale/swscale.h>
  4 +}
  5 +#ifdef WIN32
  6 +#pragma comment(lib,"swscale.lib")
  7 +#endif
2 8
  9 +#define SCALED_W 80
  10 +#define SCALED_H 60
  11 +#define SRC_W 320
  12 +#define SRC_H 240
3 13
4 CAVTranscoder::CAVTranscoder(): 14 CAVTranscoder::CAVTranscoder():
5 _start_time(INT64_MAX), 15 _start_time(INT64_MAX),
@@ -8,13 +18,17 @@ _one2one(true), @@ -8,13 +18,17 @@ _one2one(true),
8 _nOutputWidth(320), 18 _nOutputWidth(320),
9 _cur_out_v_ts(0), 19 _cur_out_v_ts(0),
10 _cur_out_a_ts(0), 20 _cur_out_a_ts(0),
11 -_max_audio(1) 21 +_max_audio(1),
  22 +_swsCtx(NULL)
12 { 23 {
13 if (_one2one) { 24 if (_one2one) {
14 _nOutputHeight = 480; 25 _nOutputHeight = 480;
15 } 26 }
16 else { 27 else {
17 _nOutputHeight = 240; 28 _nOutputHeight = 240;
  29 + _swsCtx = sws_getContext(SRC_W, SRC_H, PIX_FMT_YUV420P,
  30 + SCALED_W, SCALED_H, PIX_FMT_YUV420P, SWS_BILINEAR,
  31 + NULL, NULL, NULL);
18 } 32 }
19 } 33 }
20 34
@@ -95,6 +109,12 @@ bool CAVTranscoder::all_processed() @@ -95,6 +109,12 @@ bool CAVTranscoder::all_processed()
95 109
96 int CAVTranscoder::close() 110 int CAVTranscoder::close()
97 { 111 {
  112 + if (_swsCtx) {
  113 + sws_freeContext(_swsCtx);
  114 + _swsCtx = NULL;
  115 + }
  116 + flush_encoder(0);
  117 + flush_encoder(1);
98 av_write_trailer(_ofmt_ctx); 118 av_write_trailer(_ofmt_ctx);
99 119
100 #if USE_H264BSF 120 #if USE_H264BSF
@@ -258,6 +278,7 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -258,6 +278,7 @@ int CAVTranscoder::open_output_file(const char *filename)
258 for (int i = 0; i < 1024; i++,pdst++,psrc++) { 278 for (int i = 0; i < 1024; i++,pdst++,psrc++) {
259 *pdst += (*psrc/_max_audio); 279 *pdst += (*psrc/_max_audio);
260 } 280 }
  281 + (*it)->free_cur_a_frame();
261 } 282 }
262 } 283 }
263 284
@@ -284,41 +305,61 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -284,41 +305,61 @@ int CAVTranscoder::open_output_file(const char *filename)
284 } 305 }
285 } 306 }
286 307
  308 + AVFrame *pDstFrame = av_frame_alloc();
  309 + int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
  310 + uint8_t *dstbuf = new uint8_t[nDstSize];
  311 + avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
  312 +
287 if (idxTeacher != -1) { 313 if (idxTeacher != -1) {
288 - // the dest frame is the teacher frame 314 + //copy teacher frame to dest frame
  315 + CAVDecoder * pDecoder = decoders_got_frame[idxTeacher];
  316 + AVFrame * pFrame = pDecoder->_cur_v_frame;
  317 + if (pFrame) {
  318 + fillDestFrame(pDstFrame, pFrame, 0, 0);
  319 + }
  320 + else {//fill with pure color
  321 + memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight);
  322 + memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4);
  323 + memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4);
  324 + }
289 325
290 for (int i = 0; i < decoders_got_frame.size(); i++){ 326 for (int i = 0; i < decoders_got_frame.size(); i++){
291 if (i != idxTeacher) { 327 if (i != idxTeacher) {
292 //scale eacher frame 328 //scale eacher frame
  329 +
293 //copy each frame to the dest frame 330 //copy each frame to the dest frame
  331 +
294 } 332 }
295 } 333 }
296 -  
297 - // set the timestamp of teacher frame  
298 -  
299 - //send to encoder  
300 } 334 }
301 else { 335 else {
302 - AVFrame *pDstFrame = av_frame_alloc();  
303 - int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);  
304 - uint8_t *dstbuf = new uint8_t[nDstSize];  
305 - avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);  
306 memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight); 336 memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight);
307 memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4); 337 memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4);
308 memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4); 338 memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4);
309 339
310 for (int i = 0; i < decoders_got_frame.size(); i++){ 340 for (int i = 0; i < decoders_got_frame.size(); i++){
311 if (i != idxTeacher) { 341 if (i != idxTeacher) {
312 - idxTeacher = i;  
313 - break; 342 + //scale eacher frame
  343 +
  344 + //copy each frame to the dest frame
  345 +
314 } 346 }
315 } 347 }
  348 + }
316 349
317 - //fill the timestamp of dest frame  
318 - 350 + //fill the timestamp of dest frame
  351 + pDstFrame->pts = _cur_out_v_ts;
  352 + pDstFrame->pkt_dts = _cur_out_v_ts;
  353 + pDstFrame->pkt_pts = _cur_out_v_ts;
  354 + pDstFrame->format = AV_PIX_FMT_YUV420P;
  355 + pDstFrame->width = _nOutputWidth;
  356 + pDstFrame->height = _nOutputHeight;
  357 + _cur_out_v_ts++;
319 358
320 - //send to encoder  
321 - } 359 + //send to encoder
  360 + int got_frame = 0;
  361 + encode_write_frame(pDstFrame, 0, &got_frame);
  362 + delete dstbuf;
322 363
323 return 0; 364 return 0;
324 } 365 }
@@ -400,7 +441,6 @@ int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_i @@ -400,7 +441,6 @@ int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_i
400 av_init_packet(&enc_pkt); 441 av_init_packet(&enc_pkt);
401 ret = enc_func(_ofmt_ctx->streams[stream_index]->codec, &enc_pkt, 442 ret = enc_func(_ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
402 filt_frame, got_frame); 443 filt_frame, got_frame);
403 - av_frame_unref(filt_frame);  
404 av_frame_free(&filt_frame); 444 av_frame_free(&filt_frame);
405 if (ret < 0) 445 if (ret < 0)
406 return ret; 446 return ret;
@@ -413,7 +453,6 @@ int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_i @@ -413,7 +453,6 @@ int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_i
413 _ofmt_ctx->streams[stream_index]->codec->time_base, 453 _ofmt_ctx->streams[stream_index]->codec->time_base,
414 _ofmt_ctx->streams[stream_index]->time_base); 454 _ofmt_ctx->streams[stream_index]->time_base);
415 455
416 - av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");  
417 /* mux encoded frame */ 456 /* mux encoded frame */
418 ret = av_interleaved_write_frame(_ofmt_ctx, &enc_pkt); 457 ret = av_interleaved_write_frame(_ofmt_ctx, &enc_pkt);
419 458
@@ -40,6 +40,7 @@ private: @@ -40,6 +40,7 @@ private:
40 40
41 void * _a_frame_pool; 41 void * _a_frame_pool;
42 int _max_audio; 42 int _max_audio;
  43 + struct SwsContext * _swsCtx;
43 public: 44 public:
44 void set_max_audio(int max_audio); 45 void set_max_audio(int max_audio);
45 }; 46 };
1 -#include "AudioEncoder.h"  
2 -  
3 -  
4 -CAudioEncoder::CAudioEncoder()  
5 -{  
6 -}  
7 -  
8 -  
9 -CAudioEncoder::~CAudioEncoder()  
10 -{  
11 -}  
1 -#pragma once  
2 -class CAudioEncoder  
3 -{  
4 -public:  
5 - CAudioEncoder();  
6 - virtual ~CAudioEncoder();  
7 -};  
8 -  
1 -#include "VideoEncoder.h"  
2 -  
3 -  
4 -CVideoEncoder::CVideoEncoder()  
5 -{  
6 -}  
7 -  
8 -  
9 -CVideoEncoder::~CVideoEncoder()  
10 -{  
11 -}  
1 -#pragma once  
2 -class CVideoEncoder  
3 -{  
4 -public:  
5 - CVideoEncoder();  
6 - virtual ~CVideoEncoder();  
7 -};  
8 -  
@@ -637,8 +637,10 @@ void add_media_infos() @@ -637,8 +637,10 @@ void add_media_infos()
637 } 637 }
638 } 638 }
639 639
  640 + printf("\nsorted file info:");
640 it = sorted_media.begin(); 641 it = sorted_media.begin();
641 for (; it != sorted_media.end();){ 642 for (; it != sorted_media.end();){
  643 + printf("\n%.3f %s %s", it->type_time, it->name.c_str(), it->t_type == tt_start ? "start" : "end");
642 if (it->t_type == tt_end) { 644 if (it->t_type == tt_end) {
643 it = sorted_media.erase(it); 645 it = sorted_media.erase(it);
644 } 646 }
@@ -647,6 +649,7 @@ void add_media_infos() @@ -647,6 +649,7 @@ void add_media_infos()
647 it++; 649 it++;
648 } 650 }
649 } 651 }
  652 + printf("\n-------------------------\n");
650 653
651 it = audio_type_files.begin(); 654 it = audio_type_files.begin();
652 media_info last = *it; 655 media_info last = *it;
@@ -959,10 +962,15 @@ int process_av_files() @@ -959,10 +962,15 @@ int process_av_files()
959 if (has_file) { 962 if (has_file) {
960 media_info info = sorted_media.front(); 963 media_info info = sorted_media.front();
961 std::string m = get_outmedia_file_name(info.name.c_str()); 964 std::string m = get_outmedia_file_name(info.name.c_str());
  965 + printf("open output file:%s\n", m.c_str());
962 int ret = videoTranscoder.open_output_file(m.c_str()); 966 int ret = videoTranscoder.open_output_file(m.c_str());
963 if (ret) { 967 if (ret) {
  968 + printf("open output file:%s fail!\n", m.c_str());
964 return ret; 969 return ret;
965 } 970 }
  971 + else {
  972 + printf("open output file:%s success!\n", m.c_str());
  973 + }
966 } 974 }
967 975
968 976
@@ -997,8 +1005,7 @@ int main(int argc, char * argv[]) @@ -997,8 +1005,7 @@ int main(int argc, char * argv[])
997 if (argc < 2) { 1005 if (argc < 2) {
998 printf(" merge_pip 2.0.0\n"); 1006 printf(" merge_pip 2.0.0\n");
999 printf(" merge video files to one pip video according to record info file,\nusage:"); 1007 printf(" merge video files to one pip video according to record info file,\nusage:");
1000 - printf("\n %s record_info_filename [-p] [-k]", argv[0]);  
1001 - printf("\n -d :individual files for different time segment"); 1008 + printf("\n %s record_info_filename", argv[0]);
1002 printf("\n\n"); 1009 printf("\n\n");
1003 return -1; 1010 return -1;
1004 } 1011 }
@@ -83,22 +83,18 @@ @@ -83,22 +83,18 @@
83 </ItemGroup> 83 </ItemGroup>
84 <ItemGroup> 84 <ItemGroup>
85 <ClCompile Include="AudioDecoder.cpp" /> 85 <ClCompile Include="AudioDecoder.cpp" />
86 - <ClCompile Include="AudioEncoder.cpp" />  
87 <ClCompile Include="AVDecoder.cpp" /> 86 <ClCompile Include="AVDecoder.cpp" />
88 <ClCompile Include="merge_pip.cpp" /> 87 <ClCompile Include="merge_pip.cpp" />
89 <ClCompile Include="tools.cpp" /> 88 <ClCompile Include="tools.cpp" />
90 <ClCompile Include="VideoDecoder.cpp" /> 89 <ClCompile Include="VideoDecoder.cpp" />
91 <ClCompile Include="AVTranscoder.cpp" /> 90 <ClCompile Include="AVTranscoder.cpp" />
92 - <ClCompile Include="VideoEncoder.cpp" />  
93 </ItemGroup> 91 </ItemGroup>
94 <ItemGroup> 92 <ItemGroup>
95 <ClInclude Include="AudioDecoder.h" /> 93 <ClInclude Include="AudioDecoder.h" />
96 - <ClInclude Include="AudioEncoder.h" />  
97 <ClInclude Include="AVDecoder.h" /> 94 <ClInclude Include="AVDecoder.h" />
98 <ClInclude Include="media_info.h" /> 95 <ClInclude Include="media_info.h" />
99 <ClInclude Include="VideoDecoder.h" /> 96 <ClInclude Include="VideoDecoder.h" />
100 <ClInclude Include="AVTranscoder.h" /> 97 <ClInclude Include="AVTranscoder.h" />
101 - <ClInclude Include="VideoEncoder.h" />  
102 </ItemGroup> 98 </ItemGroup>
103 <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> 99 <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
104 <ImportGroup Label="ExtensionTargets"> 100 <ImportGroup Label="ExtensionTargets">
@@ -33,18 +33,9 @@ @@ -33,18 +33,9 @@
33 <ClCompile Include="AVTranscoder.cpp"> 33 <ClCompile Include="AVTranscoder.cpp">
34 <Filter>源文件</Filter> 34 <Filter>源文件</Filter>
35 </ClCompile> 35 </ClCompile>
36 - <ClCompile Include="VideoEncoder.cpp">  
37 - <Filter>源文件</Filter>  
38 - </ClCompile>  
39 - <ClCompile Include="AudioEncoder.cpp">  
40 - <Filter>源文件</Filter>  
41 - </ClCompile>  
42 <ClCompile Include="merge_pip.cpp"> 36 <ClCompile Include="merge_pip.cpp">
43 <Filter>源文件</Filter> 37 <Filter>源文件</Filter>
44 </ClCompile> 38 </ClCompile>
45 - <ClCompile Include="framepool.cpp">  
46 - <Filter>源文件</Filter>  
47 - </ClCompile>  
48 </ItemGroup> 39 </ItemGroup>
49 <ItemGroup> 40 <ItemGroup>
50 <ClInclude Include="VideoDecoder.h"> 41 <ClInclude Include="VideoDecoder.h">
@@ -62,14 +53,5 @@ @@ -62,14 +53,5 @@
62 <ClInclude Include="AVTranscoder.h"> 53 <ClInclude Include="AVTranscoder.h">
63 <Filter>头文件</Filter> 54 <Filter>头文件</Filter>
64 </ClInclude> 55 </ClInclude>
65 - <ClInclude Include="VideoEncoder.h">  
66 - <Filter>头文件</Filter>  
67 - </ClInclude>  
68 - <ClInclude Include="AudioEncoder.h">  
69 - <Filter>头文件</Filter>  
70 - </ClInclude>  
71 - <ClInclude Include="framepool.h">  
72 - <Filter>头文件</Filter>  
73 - </ClInclude>  
74 </ItemGroup> 56 </ItemGroup>
75 </Project> 57 </Project>