胡斌

add some code about open output file and frame merge

@@ -111,7 +111,6 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -111,7 +111,6 @@ int CAVTranscoder::open_output_file(const char *filename)
111 return AVERROR_UNKNOWN; 111 return AVERROR_UNKNOWN;
112 } 112 }
113 113
114 -  
115 for (i = 0; i < 2; i++) { 114 for (i = 0; i < 2; i++) {
116 out_stream = avformat_new_stream(ofmt_ctx, NULL); 115 out_stream = avformat_new_stream(ofmt_ctx, NULL);
117 if (!out_stream) { 116 if (!out_stream) {
@@ -124,11 +123,8 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -124,11 +123,8 @@ int CAVTranscoder::open_output_file(const char *filename)
124 if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) 123 if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
125 enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; 124 enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
126 125
127 -  
128 - if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO  
129 - || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {  
130 - /* in this example, we choose transcoding to same codec */  
131 - encoder = avcodec_find_encoder(dec_ctx->codec_id); 126 + if (0 == i) {
  127 + encoder = avcodec_find_encoder(AV_CODEC_ID_H264);;
132 if (!encoder) { 128 if (!encoder) {
133 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n"); 129 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
134 return AVERROR_INVALIDDATA; 130 return AVERROR_INVALIDDATA;
@@ -137,55 +133,50 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -137,55 +133,50 @@ int CAVTranscoder::open_output_file(const char *filename)
137 /* In this example, we transcode to same properties (picture size, 133 /* In this example, we transcode to same properties (picture size,
138 * sample rate etc.). These properties can be changed for output 134 * sample rate etc.). These properties can be changed for output
139 * streams easily using filters */ 135 * streams easily using filters */
140 - if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {  
141 - enc_ctx->height = dec_ctx->height;  
142 - enc_ctx->width = dec_ctx->width;  
143 - enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; 136 + enc_ctx->height = _nOutputHeight;
  137 + enc_ctx->width = _nOutputWidth;
  138 + enc_ctx->sample_aspect_ratio.den = 1;
  139 + enc_ctx->sample_aspect_ratio.num = 1;
144 /* take first format from list of supported formats */ 140 /* take first format from list of supported formats */
145 - enc_ctx->pix_fmt = encoder->pix_fmts[0]; 141 + enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
146 /* video time_base can be set to whatever is handy and supported by encoder */ 142 /* video time_base can be set to whatever is handy and supported by encoder */
147 - enc_ctx->time_base = dec_ctx->time_base; 143 + enc_ctx->time_base.num = 1;
  144 + enc_ctx->time_base.den = 20;
148 145
149 enc_ctx->me_range = 16; 146 enc_ctx->me_range = 16;
150 enc_ctx->max_qdiff = 4; 147 enc_ctx->max_qdiff = 4;
151 enc_ctx->qmin = 10; 148 enc_ctx->qmin = 10;
152 enc_ctx->qmax = 30; 149 enc_ctx->qmax = 30;
153 enc_ctx->qcompress = 0.6; 150 enc_ctx->qcompress = 0.6;
  151 + /* Third parameter can be used to pass settings to encoder */
  152 + ret = avcodec_open2(enc_ctx, encoder, NULL);
  153 + if (ret < 0) {
  154 + av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
  155 + return ret;
  156 + }
154 } 157 }
155 else { 158 else {
156 - enc_ctx->sample_rate = dec_ctx->sample_rate;  
157 - enc_ctx->channel_layout = dec_ctx->channel_layout;  
158 - enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout); 159 + encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);;
  160 + if (!encoder) {
  161 + av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
  162 + return AVERROR_INVALIDDATA;
  163 + }
  164 + enc_ctx->sample_rate = 48000;
  165 + enc_ctx->channel_layout = AV_CH_LAYOUT_MONO;
  166 + enc_ctx->channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_MONO);
159 /* take first format from list of supported formats */ 167 /* take first format from list of supported formats */
160 - enc_ctx->sample_fmt = encoder->sample_fmts[0]; 168 + enc_ctx->sample_fmt = AV_SAMPLE_FMT_S16; //AV_SAMPLE_FMT_FLTP;
161 enc_ctx->time_base.num = 1; 169 enc_ctx->time_base.num = 1;
162 enc_ctx->time_base.den = enc_ctx->sample_rate; 170 enc_ctx->time_base.den = enc_ctx->sample_rate;
  171 + /* Third parameter can be used to pass settings to encoder */
  172 + ret = avcodec_open2(enc_ctx, encoder, NULL);
  173 + if (ret < 0) {
  174 + av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
  175 + return ret;
  176 + }
163 } 177 }
  178 + }
164 179
165 - /* Third parameter can be used to pass settings to encoder */  
166 - ret = avcodec_open2(enc_ctx, encoder, NULL);  
167 - if (ret < 0) {  
168 - av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);  
169 - return ret;  
170 - }  
171 - }  
172 - else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {  
173 - av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);  
174 - return AVERROR_INVALIDDATA;  
175 - }  
176 -#if 0  
177 - else {  
178 - /* if this stream must be remuxed */  
179 - ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,  
180 - ifmt_ctx->streams[i]->codec);  
181 - if (ret < 0) {  
182 - av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");  
183 - return ret;  
184 - }  
185 -#endif  
186 - }  
187 -  
188 -#if 0  
189 av_dump_format(ofmt_ctx, 0, filename, 1); 180 av_dump_format(ofmt_ctx, 0, filename, 1);
190 181
191 if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) { 182 if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
@@ -202,7 +193,7 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -202,7 +193,7 @@ int CAVTranscoder::open_output_file(const char *filename)
202 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n"); 193 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
203 return ret; 194 return ret;
204 } 195 }
205 -#endif 196 +
206 return 0; 197 return 0;
207 } 198 }
208 199
@@ -228,12 +219,64 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -228,12 +219,64 @@ int CAVTranscoder::open_output_file(const char *filename)
228 219
229 int CAVTranscoder::mix_and_output_one2many_vframe(vector<CAVDecoder *> & decoders_got_frame) 220 int CAVTranscoder::mix_and_output_one2many_vframe(vector<CAVDecoder *> & decoders_got_frame)
230 { 221 {
  222 + int idxTeacher = -1;
  223 + for (int i = 0; i < decoders_got_frame.size(); i++){
  224 + if (decoders_got_frame[i]->_media_role == mr_teacher) {
  225 + idxTeacher = i;
  226 + break;
  227 + }
  228 + }
  229 +
  230 + if (idxTeacher != -1) {
  231 + // the dest frame is the teacher frame
  232 +
  233 + for (int i = 0; i < decoders_got_frame.size(); i++){
  234 + if (i != idxTeacher) {
  235 + //scale eacher frame
  236 + //copy each frame to the dest frame
  237 + }
  238 + }
  239 +
  240 + // set the timestamp of teacher frame
  241 +
  242 + //send to encoder
  243 + }
  244 + else {
  245 + AVFrame *pDstFrame = av_frame_alloc();
  246 + int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
  247 + uint8_t *dstbuf = new uint8_t[nDstSize];
  248 + avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
  249 + memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight);
  250 + memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4);
  251 + memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4);
  252 +
  253 + for (int i = 0; i < decoders_got_frame.size(); i++){
  254 + if (i != idxTeacher) {
  255 + idxTeacher = i;
  256 + break;
  257 + }
  258 + }
  259 +
  260 + //fill the timestamp of dest frame
  261 +
  262 +
  263 + //send to encoder
  264 + }
  265 +
231 return 0; 266 return 0;
232 } 267 }
233 268
234 int CAVTranscoder::fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y) 269 int CAVTranscoder::fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y)
235 { 270 {
236 - 271 + for (int i = 0; i < pSrcFrame->height; i++) {
  272 + memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]);
  273 + }
  274 +
  275 + for (int i = 0; i < pSrcFrame->height / 2; i++){
  276 + memcpy(pDstFrame->data[1] + (y / 2)*pDstFrame->linesize[1] + x / 2, pSrcFrame->data[1] + i * pSrcFrame->linesize[1], pSrcFrame->linesize[1]);
  277 + memcpy(pDstFrame->data[2] + (y / 2)*pDstFrame->linesize[2] + x / 2, pSrcFrame->data[2] + i * pSrcFrame->linesize[2], pSrcFrame->linesize[2]);
  278 + }
  279 + return 0;
237 } 280 }
238 281
239 int CAVTranscoder::mix_and_output_one2one_vframe(vector<CAVDecoder *> & decoders_got_frame) 282 int CAVTranscoder::mix_and_output_one2one_vframe(vector<CAVDecoder *> & decoders_got_frame)
@@ -249,13 +292,19 @@ int CAVTranscoder::open_output_file(const char *filename) @@ -249,13 +292,19 @@ int CAVTranscoder::open_output_file(const char *filename)
249 } 292 }
250 else { 293 else {
251 fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0); 294 fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0);
  295 + //todo: fill the bottom half image with pure color
252 } 296 }
  297 +
  298 + //fill the timestamp of dest frame
  299 +
  300 +
  301 + //send to encoder
  302 +
  303 +
253 return 0; 304 return 0;
254 } 305 }
255 306
256 int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) { 307 int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
257 -  
258 -  
259 int ret; 308 int ret;
260 int got_frame_local; 309 int got_frame_local;
261 AVPacket enc_pkt; 310 AVPacket enc_pkt;
@@ -12,8 +12,6 @@ public: @@ -12,8 +12,6 @@ public:
12 int64_t transcode(); 12 int64_t transcode();
13 bool all_processed(); 13 bool all_processed();
14 int close(); 14 int close();
15 -  
16 -protected:  
17 int open_output_file(const char *filename); 15 int open_output_file(const char *filename);
18 16
19 protected: 17 protected:
@@ -3,7 +3,8 @@ extern AVRational timebase_ms; @@ -3,7 +3,8 @@ extern AVRational timebase_ms;
3 3
4 CVideoDecoder::CVideoDecoder() : 4 CVideoDecoder::CVideoDecoder() :
5 _start_time(-10.0), 5 _start_time(-10.0),
6 -_is_finished(false) 6 +_is_finished(false),
  7 +_uid(0)
7 { 8 {
8 9
9 } 10 }
@@ -16,6 +17,7 @@ int CVideoDecoder::add(media_info &info) @@ -16,6 +17,7 @@ int CVideoDecoder::add(media_info &info)
16 { 17 {
17 if (_start_time < -1.0) {//the the start time of this decoder 18 if (_start_time < -1.0) {//the the start time of this decoder
18 _start_time = info.start_time; 19 _start_time = info.start_time;
  20 + _uid = info.uid;
19 } 21 }
20 _info.push_back(info); 22 _info.push_back(info);
21 23
@@ -307,7 +309,7 @@ int CVideoDecoder::filter_encode_write_frame(AVFrame *frame, unsigned int stream @@ -307,7 +309,7 @@ int CVideoDecoder::filter_encode_write_frame(AVFrame *frame, unsigned int stream
307 309
308 unsigned int CVideoDecoder::getuid() 310 unsigned int CVideoDecoder::getuid()
309 { 311 {
310 - return 0; 312 + return _uid;
311 } 313 }
312 314
313 int CVideoDecoder::get_one_frame(AVFrame ** pFrame, int64_t & ts) 315 int CVideoDecoder::get_one_frame(AVFrame ** pFrame, int64_t & ts)
@@ -12,9 +12,9 @@ public: @@ -12,9 +12,9 @@ public:
12 unsigned int getuid(); 12 unsigned int getuid();
13 13
14 int get_one_frame(AVFrame ** pFrame, int64_t & ts); 14 int get_one_frame(AVFrame ** pFrame, int64_t & ts);
15 -  
16 -protected:  
17 int open_input_file(const char *filename); 15 int open_input_file(const char *filename);
  16 +protected:
  17 +
18 int init_filters(void); 18 int init_filters(void);
19 int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, const char *filter_spec); 19 int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, const char *filter_spec);
20 int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index); 20 int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index);
@@ -30,5 +30,6 @@ protected: @@ -30,5 +30,6 @@ protected:
30 30
31 bool _is_finished; 31 bool _is_finished;
32 AVRational _codec_timebase; 32 AVRational _codec_timebase;
  33 + unsigned int _uid;
33 }; 34 };
34 35
@@ -826,6 +826,18 @@ void get_outinfo_file_name(char * input) @@ -826,6 +826,18 @@ void get_outinfo_file_name(char * input)
826 strcat(out_info_file, "_out.txt"); 826 strcat(out_info_file, "_out.txt");
827 } 827 }
828 828
  829 +string get_outmedia_file_name(const char * input)
  830 +{
  831 + char out_media_file[1024];
  832 + strcpy(out_media_file, input);
  833 + char * p = strstr(out_media_file, ".");
  834 + if (p) {
  835 + *p = 0;
  836 + }
  837 + strcat(out_media_file, "_out.mp4");
  838 + return out_media_file;
  839 +}
  840 +
829 int load_record_info(char * record_info) 841 int load_record_info(char * record_info)
830 { 842 {
831 ifstream fin(record_info); 843 ifstream fin(record_info);
@@ -884,6 +896,16 @@ int process_av_files() @@ -884,6 +896,16 @@ int process_av_files()
884 896
885 int64_t cur_time = 0; 897 int64_t cur_time = 0;
886 bool has_file = sorted_media.size(); 898 bool has_file = sorted_media.size();
  899 + if (has_file) {
  900 + media_info info = sorted_media.front();
  901 + std::string m = get_outmedia_file_name(info.name.c_str());
  902 + int ret = videoTranscoder.open_output_file(m.c_str());
  903 + if (ret) {
  904 + return ret;
  905 + }
  906 + }
  907 +
  908 +
887 while (has_file){ 909 while (has_file){
888 while (has_file){ 910 while (has_file){
889 media_info info = sorted_media.front(); 911 media_info info = sorted_media.front();
@@ -35,4 +35,10 @@ time_t calc_sec1970(int Y, int M, int D, int h, int m, int s) @@ -35,4 +35,10 @@ time_t calc_sec1970(int Y, int M, int D, int h, int m, int s)
35 sec += h * 60 * 60 + m * 60 + s; 35 sec += h * 60 * 60 + m * 60 + s;
36 36
37 return sec; 37 return sec;
38 -}  
  38 +}
  39 +
  40 +void RGB2YUV(unsigned char r, unsigned char g, unsigned char b, unsigned char *y, unsigned char * u, unsigned char * v){
  41 + *y = (unsigned char)(0.30*r + 0.59*g + 0.11*b);
  42 + *u = (unsigned char)(0.493*(b - (*y)));
  43 + *v = (unsigned char)(0.877*(r - (*y)));
  44 +}
@@ -2,4 +2,5 @@ @@ -2,4 +2,5 @@
2 #define TOOLS_H 2 #define TOOLS_H
3 #include <time.h> 3 #include <time.h>
4 time_t calc_sec1970(int Y, int M, int D, int h, int m, int s); 4 time_t calc_sec1970(int Y, int M, int D, int h, int m, int s);
  5 +void RGB2YUV(unsigned char r, unsigned char g, unsigned char b, unsigned char *y, unsigned char * u, unsigned char * v);
5 #endif 6 #endif