胡斌

add some code about output merged video file,set uid of avdecoder

@@ -21,6 +21,7 @@ CAVDecoder::~CAVDecoder() @@ -21,6 +21,7 @@ CAVDecoder::~CAVDecoder()
21 int CAVDecoder::add(media_info &info) 21 int CAVDecoder::add(media_info &info)
22 { 22 {
23 _media_role = info.m_role; 23 _media_role = info.m_role;
  24 + _uid = info.uid;
24 if (info.m_type == mt_audio) { 25 if (info.m_type == mt_audio) {
25 _a_start_time_ms = info.start_time_ms; 26 _a_start_time_ms = info.start_time_ms;
26 _a_end_time_ms = info.end_time_ms; 27 _a_end_time_ms = info.end_time_ms;
@@ -42,13 +43,15 @@ int CAVDecoder::add(media_info &info) @@ -42,13 +43,15 @@ int CAVDecoder::add(media_info &info)
42 if (_end_time_ms < info.end_time_ms) { 43 if (_end_time_ms < info.end_time_ms) {
43 _end_time_ms = info.end_time_ms; 44 _end_time_ms = info.end_time_ms;
44 } 45 }
  46 +
  47 + av_log(NULL, AV_LOG_INFO, "CAVDecoder add info:%lf, %"PRIu64", %"PRIu64"\n", _cur_a_ts_ms, _cur_v_ts_ms, _end_time_ms);
45 48
46 return 0; 49 return 0;
47 } 50 }
48 51
49 unsigned int CAVDecoder::getuid() 52 unsigned int CAVDecoder::getuid()
50 { 53 {
51 - return 0; 54 + return _uid;
52 } 55 }
53 56
54 bool CAVDecoder::get_one_v_frame() 57 bool CAVDecoder::get_one_v_frame()
@@ -58,12 +61,13 @@ bool CAVDecoder::get_one_v_frame() @@ -58,12 +61,13 @@ bool CAVDecoder::get_one_v_frame()
58 if (_video_info.size()) { 61 if (_video_info.size()) {
59 ret = _video_decoder.get_one_frame(&_cur_v_frame, ts); 62 ret = _video_decoder.get_one_frame(&_cur_v_frame, ts);
60 if (ret == 0) { 63 if (ret == 0) {
61 - _cur_v_ts_ms = _v_start_time_ms + ts; 64 + //_cur_v_ts_ms = _v_start_time_ms + ts;
  65 + _cur_v_ts_ms += VFRAME_DURATION_MS;
62 } 66 }
63 else { 67 else {
64 _video_info.pop_front(); 68 _video_info.pop_front();
65 if (_cur_v_ts_ms < _end_time_ms) { 69 if (_cur_v_ts_ms < _end_time_ms) {
66 - _cur_v_ts_ms += 50;//return last v frame 70 + _cur_v_ts_ms += VFRAME_DURATION_MS;//return last v frame
67 ret = 0; 71 ret = 0;
68 } 72 }
69 } 73 }
@@ -71,7 +75,7 @@ bool CAVDecoder::get_one_v_frame() @@ -71,7 +75,7 @@ bool CAVDecoder::get_one_v_frame()
71 75
72 if (ret) {//no video decoded 76 if (ret) {//no video decoded
73 if (_cur_v_ts_ms < _end_time_ms) {//should have as video frame 77 if (_cur_v_ts_ms < _end_time_ms) {//should have as video frame
74 - _cur_v_ts_ms += 50;//return last v frame 78 + _cur_v_ts_ms += VFRAME_DURATION_MS;//return last v frame
75 ret = 0; 79 ret = 0;
76 if (!_cur_v_frame) { 80 if (!_cur_v_frame) {
77 _cur_v_frame = get_blank_frame(); 81 _cur_v_frame = get_blank_frame();
@@ -114,9 +118,9 @@ bool CAVDecoder::get_one_a_frame() @@ -114,9 +118,9 @@ bool CAVDecoder::get_one_a_frame()
114 int64_t ts; 118 int64_t ts;
115 int ret = -1; 119 int ret = -1;
116 if (_audio_info.size()) { 120 if (_audio_info.size()) {
117 - ret = _audio_decoder.get_one_frame(&_cur_v_frame, ts); 121 + ret = _audio_decoder.get_one_frame(&_cur_a_frame, ts);
118 if (ret == 0) { 122 if (ret == 0) {
119 - _cur_a_ts_ms = _a_start_time_ms + ts; 123 + _cur_a_ts_ms += AFRAME_DURATION_MS;
120 } 124 }
121 else { 125 else {
122 _audio_info.pop_front(); 126 _audio_info.pop_front();
@@ -13,7 +13,7 @@ public: @@ -13,7 +13,7 @@ public:
13 unsigned int getuid(); 13 unsigned int getuid();
14 bool get_one_a_frame(); 14 bool get_one_a_frame();
15 bool get_one_v_frame(); 15 bool get_one_v_frame();
16 - int64_t _cur_a_ts_ms; 16 + double _cur_a_ts_ms;
17 int64_t _cur_v_ts_ms; 17 int64_t _cur_v_ts_ms;
18 media_role _media_role; 18 media_role _media_role;
19 19
@@ -29,6 +29,7 @@ protected: @@ -29,6 +29,7 @@ protected:
29 int64_t _v_start_time_ms; 29 int64_t _v_start_time_ms;
30 int64_t _v_end_time_ms; 30 int64_t _v_end_time_ms;
31 int64_t _end_time_ms; 31 int64_t _end_time_ms;
  32 + unsigned int _uid;
32 33
33 private: 34 private:
34 AVFrame * get_blank_frame(); 35 AVFrame * get_blank_frame();
@@ -4,8 +4,10 @@ @@ -4,8 +4,10 @@
4 CAVTranscoder::CAVTranscoder(): 4 CAVTranscoder::CAVTranscoder():
5 _start_time(INT64_MAX), 5 _start_time(INT64_MAX),
6 _all_processed(true), 6 _all_processed(true),
7 -_one2one(false),  
8 -_nOutputWidth(320) 7 +_one2one(true),
  8 +_nOutputWidth(320),
  9 +_cur_out_v_ts(0),
  10 +_cur_out_a_ts(0)
9 { 11 {
10 if (_one2one) { 12 if (_one2one) {
11 _nOutputHeight = 480; 13 _nOutputHeight = 480;
@@ -61,7 +63,7 @@ int64_t CAVTranscoder::transcode() @@ -61,7 +63,7 @@ int64_t CAVTranscoder::transcode()
61 _all_processed = decoders_got_frame.size() == 0; 63 _all_processed = decoders_got_frame.size() == 0;
62 mix_and_output_vframe(decoders_got_frame); 64 mix_and_output_vframe(decoders_got_frame);
63 65
64 - _cur_v_time += 50; 66 + _cur_v_time += VFRAME_DURATION_MS;
65 //sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); 67 //sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
66 68
67 while (_cur_a_time < _cur_v_time) 69 while (_cur_a_time < _cur_v_time)
@@ -92,6 +94,25 @@ bool CAVTranscoder::all_processed() @@ -92,6 +94,25 @@ bool CAVTranscoder::all_processed()
92 94
93 int CAVTranscoder::close() 95 int CAVTranscoder::close()
94 { 96 {
  97 + av_write_trailer(_ofmt_ctx);
  98 +
  99 +#if USE_H264BSF
  100 + av_bitstream_filter_close(h264bsfc);
  101 +#endif
  102 +#if USE_AACBSF
  103 + av_bitstream_filter_close(aacbsfc);
  104 +#endif
  105 + int i;
  106 + for (i = 0; i<2; i++)
  107 + {
  108 + if (_ofmt_ctx && _ofmt_ctx->nb_streams > i && _ofmt_ctx->streams[i] && _ofmt_ctx->streams[i]->codec)
  109 + avcodec_close(_ofmt_ctx->streams[i]->codec);
  110 + }
  111 +
  112 + if (_ofmt_ctx && !(_ofmt_ctx->oformat->flags & AVFMT_NOFILE))
  113 + avio_close(_ofmt_ctx->pb);
  114 + avformat_free_context(_ofmt_ctx);
  115 +
95 return 0; 116 return 0;
96 } 117 }
97 118
@@ -99,20 +120,20 @@ int CAVTranscoder::close() @@ -99,20 +120,20 @@ int CAVTranscoder::close()
99 int CAVTranscoder::open_output_file(const char *filename) 120 int CAVTranscoder::open_output_file(const char *filename)
100 { 121 {
101 AVStream *out_stream; 122 AVStream *out_stream;
102 - AVCodecContext *dec_ctx, *enc_ctx; 123 + AVCodecContext *enc_ctx;
103 AVCodec *encoder; 124 AVCodec *encoder;
104 int ret; 125 int ret;
105 unsigned int i; 126 unsigned int i;
106 127
107 - ofmt_ctx = NULL;  
108 - avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);  
109 - if (!ofmt_ctx) { 128 + _ofmt_ctx = NULL;
  129 + avformat_alloc_output_context2(&_ofmt_ctx, NULL, NULL, filename);
  130 + if (!_ofmt_ctx) {
110 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n"); 131 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
111 return AVERROR_UNKNOWN; 132 return AVERROR_UNKNOWN;
112 } 133 }
113 134
114 for (i = 0; i < 2; i++) { 135 for (i = 0; i < 2; i++) {
115 - out_stream = avformat_new_stream(ofmt_ctx, NULL); 136 + out_stream = avformat_new_stream(_ofmt_ctx, NULL);
116 if (!out_stream) { 137 if (!out_stream) {
117 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n"); 138 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
118 return AVERROR_UNKNOWN; 139 return AVERROR_UNKNOWN;
@@ -120,7 +141,7 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -120,7 +141,7 @@ int CAVTranscoder::open_output_file(const char *filename)
120 141
121 enc_ctx = out_stream->codec; 142 enc_ctx = out_stream->codec;
122 143
123 - if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) 144 + if (_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
124 enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; 145 enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
125 146
126 if (0 == i) { 147 if (0 == i) {
@@ -177,10 +198,10 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -177,10 +198,10 @@ int CAVTranscoder::open_output_file(const char *filename)
177 } 198 }
178 } 199 }
179 200
180 - av_dump_format(ofmt_ctx, 0, filename, 1); 201 + av_dump_format(_ofmt_ctx, 0, filename, 1);
181 202
182 - if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {  
183 - ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE); 203 + if (!(_ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
  204 + ret = avio_open(&_ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
184 if (ret < 0) { 205 if (ret < 0) {
185 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename); 206 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
186 return ret; 207 return ret;
@@ -188,7 +209,7 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -188,7 +209,7 @@ int CAVTranscoder::open_output_file(const char *filename)
188 } 209 }
189 210
190 /* init muxer, write output file header */ 211 /* init muxer, write output file header */
191 - ret = avformat_write_header(ofmt_ctx, NULL); 212 + ret = avformat_write_header(_ofmt_ctx, NULL);
192 if (ret < 0) { 213 if (ret < 0) {
193 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n"); 214 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
194 return ret; 215 return ret;
@@ -268,6 +289,9 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -268,6 +289,9 @@ int CAVTranscoder::open_output_file(const char *filename)
268 289
269 int CAVTranscoder::fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y) 290 int CAVTranscoder::fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y)
270 { 291 {
  292 + if (!pSrcFrame){
  293 + return 0;
  294 + }
271 for (int i = 0; i < pSrcFrame->height; i++) { 295 for (int i = 0; i < pSrcFrame->height; i++) {
272 memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]); 296 memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]);
273 } 297 }
@@ -288,40 +312,53 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -288,40 +312,53 @@ int CAVTranscoder::open_output_file(const char *filename)
288 avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight); 312 avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
289 if (decoders_got_frame.size() == 2){ 313 if (decoders_got_frame.size() == 2){
290 fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, decoders_got_frame[0]->_media_role == mr_teacher ? 0 : 240); 314 fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, decoders_got_frame[0]->_media_role == mr_teacher ? 0 : 240);
  315 + decoders_got_frame[0]->free_cur_v_frame();
291 fillDestFrame(pDstFrame, decoders_got_frame[1]->_cur_v_frame, 0, decoders_got_frame[1]->_media_role == mr_teacher ? 0 : 240); 316 fillDestFrame(pDstFrame, decoders_got_frame[1]->_cur_v_frame, 0, decoders_got_frame[1]->_media_role == mr_teacher ? 0 : 240);
  317 + decoders_got_frame[1]->free_cur_v_frame();
292 } 318 }
293 - else { 319 + else if (decoders_got_frame.size() == 1)
  320 + {
294 fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0); 321 fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0);
  322 + decoders_got_frame[0]->free_cur_v_frame();
295 //todo: fill the bottom half image with pure color 323 //todo: fill the bottom half image with pure color
296 } 324 }
  325 + else {
  326 + //fill with last image?
  327 + }
297 328
298 //fill the timestamp of dest frame 329 //fill the timestamp of dest frame
299 - 330 + _cur_out_v_ts++;
  331 + pDstFrame->pts = _cur_out_v_ts;
  332 + pDstFrame->pkt_dts = _cur_out_v_ts;
  333 + pDstFrame->pkt_pts = _cur_out_v_ts;
  334 + pDstFrame->format = AV_PIX_FMT_YUV420P;
  335 + pDstFrame->width = _nOutputWidth;
  336 + pDstFrame->height = _nOutputHeight;
300 337
301 //send to encoder 338 //send to encoder
302 - 339 + int got_frame = 0;
  340 + encode_write_frame(pDstFrame, 0, &got_frame);
  341 + delete dstbuf;
303 342
304 return 0; 343 return 0;
305 } 344 }
306 345
307 -int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) { 346 +int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
308 int ret; 347 int ret;
309 int got_frame_local; 348 int got_frame_local;
310 AVPacket enc_pkt; 349 AVPacket enc_pkt;
311 -#if 0 350 +
312 int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) = 351 int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
313 - (ifmt_ctx->streams[stream_index]->codec->codec_type ==  
314 - AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2; 352 + stream_index == 0 ? avcodec_encode_video2 : avcodec_encode_audio2;
315 353
316 if (!got_frame) 354 if (!got_frame)
317 got_frame = &got_frame_local; 355 got_frame = &got_frame_local;
318 356
319 - av_log(NULL, AV_LOG_INFO, "Encoding frame\n");  
320 /* encode filtered frame */ 357 /* encode filtered frame */
321 enc_pkt.data = NULL; 358 enc_pkt.data = NULL;
322 enc_pkt.size = 0; 359 enc_pkt.size = 0;
323 av_init_packet(&enc_pkt); 360 av_init_packet(&enc_pkt);
324 - ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt, 361 + ret = enc_func(_ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
325 filt_frame, got_frame); 362 filt_frame, got_frame);
326 av_frame_free(&filt_frame); 363 av_frame_free(&filt_frame);
327 if (ret < 0) 364 if (ret < 0)
@@ -332,23 +369,23 @@ int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_ @@ -332,23 +369,23 @@ int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_
332 /* prepare packet for muxing */ 369 /* prepare packet for muxing */
333 enc_pkt.stream_index = stream_index; 370 enc_pkt.stream_index = stream_index;
334 av_packet_rescale_ts(&enc_pkt, 371 av_packet_rescale_ts(&enc_pkt,
335 - ofmt_ctx->streams[stream_index]->codec->time_base,  
336 - ofmt_ctx->streams[stream_index]->time_base); 372 + _ofmt_ctx->streams[stream_index]->codec->time_base,
  373 + _ofmt_ctx->streams[stream_index]->time_base);
337 374
338 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n"); 375 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
339 /* mux encoded frame */ 376 /* mux encoded frame */
340 - ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);  
341 -#endif 377 + ret = av_interleaved_write_frame(_ofmt_ctx, &enc_pkt);
  378 +
342 return ret; 379 return ret;
343 } 380 }
344 381
345 -#if 0  
346 -static int flush_encoder(unsigned int stream_index) 382 +
  383 +int CAVTranscoder::flush_encoder(unsigned int stream_index)
347 { 384 {
348 int ret; 385 int ret;
349 int got_frame; 386 int got_frame;
350 387
351 - if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities & 388 + if (!(_ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
352 CODEC_CAP_DELAY)) 389 CODEC_CAP_DELAY))
353 return 0; 390 return 0;
354 391
@@ -362,4 +399,3 @@ static int flush_encoder(unsigned int stream_index) @@ -362,4 +399,3 @@ static int flush_encoder(unsigned int stream_index)
362 } 399 }
363 return ret; 400 return ret;
364 } 401 }
365 -#endif  
@@ -17,12 +17,14 @@ public: @@ -17,12 +17,14 @@ public:
17 protected: 17 protected:
18 vector < CAVDecoder *> _decoders; 18 vector < CAVDecoder *> _decoders;
19 19
20 - AVFormatContext *ofmt_ctx; 20 + AVFormatContext * _ofmt_ctx;
21 int64_t _start_time; 21 int64_t _start_time;
22 - int64_t _cur_a_time; 22 + double _cur_a_time;
23 int64_t _cur_v_time; 23 int64_t _cur_v_time;
24 int _nOutputWidth; 24 int _nOutputWidth;
25 int _nOutputHeight; 25 int _nOutputHeight;
  26 + int64_t _cur_out_v_ts;
  27 + int64_t _cur_out_a_ts;
26 28
27 private: 29 private:
28 int mix_and_output_vframe(vector<CAVDecoder *> & decoders_got_frame); 30 int mix_and_output_vframe(vector<CAVDecoder *> & decoders_got_frame);
@@ -33,5 +35,7 @@ private: @@ -33,5 +35,7 @@ private:
33 int mix_and_output_one2many_vframe(vector<CAVDecoder *> & decoders_got_frame); 35 int mix_and_output_one2many_vframe(vector<CAVDecoder *> & decoders_got_frame);
34 36
35 int fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y); 37 int fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y);
  38 + int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame);
  39 + int flush_encoder(unsigned int stream_index);
36 }; 40 };
37 41
@@ -77,5 +77,5 @@ typedef struct FilteringContext { @@ -77,5 +77,5 @@ typedef struct FilteringContext {
77 } FilteringContext; 77 } FilteringContext;
78 78
79 79
80 -#define AFRAME_DURATION_MS 20 80 +#define AFRAME_DURATION_MS 0.02133333
81 #define VFRAME_DURATION_MS 50 81 #define VFRAME_DURATION_MS 50
@@ -597,9 +597,10 @@ void add_media_infos() @@ -597,9 +597,10 @@ void add_media_infos()
597 m.start_time_ms = f.start_time * 1000; 597 m.start_time_ms = f.start_time * 1000;
598 m.end_time_ms = f.end_time * 1000; 598 m.end_time_ms = f.end_time * 1000;
599 add_media_info(m); 599 add_media_info(m);
  600 + /*
600 m.t_type = tt_end; 601 m.t_type = tt_end;
601 m.type_time = m.end_time; 602 m.type_time = m.end_time;
602 - add_media_info(m); 603 + add_media_info(m);*/
603 } 604 }
604 605
605 list<media_info>::iterator it = sorted_media.begin(); 606 list<media_info>::iterator it = sorted_media.begin();
@@ -834,7 +835,7 @@ string get_outmedia_file_name(const char * input) @@ -834,7 +835,7 @@ string get_outmedia_file_name(const char * input)
834 if (p) { 835 if (p) {
835 *p = 0; 836 *p = 0;
836 } 837 }
837 - strcat(out_media_file, "_out.mp4"); 838 + strcat(out_media_file, "_out.ts");
838 return out_media_file; 839 return out_media_file;
839 } 840 }
840 841
@@ -21,9 +21,6 @@ @@ -21,9 +21,6 @@
21 <ClCompile Include="tools.cpp"> 21 <ClCompile Include="tools.cpp">
22 <Filter>源文件</Filter> 22 <Filter>源文件</Filter>
23 </ClCompile> 23 </ClCompile>
24 - <ClCompile Include="merge_pip.cpp">  
25 - <Filter>源文件</Filter>  
26 - </ClCompile>  
27 <ClCompile Include="VideoDecoder.cpp"> 24 <ClCompile Include="VideoDecoder.cpp">
28 <Filter>源文件</Filter> 25 <Filter>源文件</Filter>
29 </ClCompile> 26 </ClCompile>
@@ -42,6 +39,9 @@ @@ -42,6 +39,9 @@
42 <ClCompile Include="AudioEncoder.cpp"> 39 <ClCompile Include="AudioEncoder.cpp">
43 <Filter>源文件</Filter> 40 <Filter>源文件</Filter>
44 </ClCompile> 41 </ClCompile>
  42 + <ClCompile Include="merge_pip.cpp">
  43 + <Filter>资源文件</Filter>
  44 + </ClCompile>
45 </ItemGroup> 45 </ItemGroup>
46 <ItemGroup> 46 <ItemGroup>
47 <ClInclude Include="VideoDecoder.h"> 47 <ClInclude Include="VideoDecoder.h">