胡斌

add some code about open output file and frame merge

... ... @@ -111,7 +111,6 @@ int CAVTranscoder::open_output_file(const char *filename)
return AVERROR_UNKNOWN;
}
for (i = 0; i < 2; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
... ... @@ -124,11 +123,8 @@ int CAVTranscoder::open_output_file(const char *filename)
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(dec_ctx->codec_id);
if (0 == i) {
encoder = avcodec_find_encoder(AV_CODEC_ID_H264);;
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
... ... @@ -137,31 +133,21 @@ int CAVTranscoder::open_output_file(const char *filename)
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
enc_ctx->height = _nOutputHeight;
enc_ctx->width = _nOutputWidth;
enc_ctx->sample_aspect_ratio.den = 1;
enc_ctx->sample_aspect_ratio.num = 1;
/* take first format from list of supported formats */
enc_ctx->pix_fmt = encoder->pix_fmts[0];
enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = dec_ctx->time_base;
enc_ctx->time_base.num = 1;
enc_ctx->time_base.den = 20;
enc_ctx->me_range = 16;
enc_ctx->max_qdiff = 4;
enc_ctx->qmin = 10;
enc_ctx->qmax = 30;
enc_ctx->qcompress = 0.6;
}
else {
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->time_base.num = 1;
enc_ctx->time_base.den = enc_ctx->sample_rate;
}
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
... ... @@ -169,23 +155,28 @@ int CAVTranscoder::open_output_file(const char *filename)
return ret;
}
}
else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
else {
encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);;
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
#if 0
else {
/* if this stream must be remuxed */
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
ifmt_ctx->streams[i]->codec);
enc_ctx->sample_rate = 48000;
enc_ctx->channel_layout = AV_CH_LAYOUT_MONO;
enc_ctx->channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_MONO);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = AV_SAMPLE_FMT_S16; //AV_SAMPLE_FMT_FLTP;
enc_ctx->time_base.num = 1;
enc_ctx->time_base.den = enc_ctx->sample_rate;
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
#endif
}
}
#if 0
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
... ... @@ -202,7 +193,7 @@ int CAVTranscoder::open_output_file(const char *filename)
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
#endif
return 0;
}
... ... @@ -228,12 +219,64 @@ int CAVTranscoder::open_output_file(const char *filename)
int CAVTranscoder::mix_and_output_one2many_vframe(vector<CAVDecoder *> & decoders_got_frame)
{
int idxTeacher = -1;
for (int i = 0; i < decoders_got_frame.size(); i++){
if (decoders_got_frame[i]->_media_role == mr_teacher) {
idxTeacher = i;
break;
}
}
if (idxTeacher != -1) {
// the dest frame is the teacher frame
for (int i = 0; i < decoders_got_frame.size(); i++){
if (i != idxTeacher) {
//scale eacher frame
//copy each frame to the dest frame
}
}
// set the timestamp of teacher frame
//send to encoder
}
else {
AVFrame *pDstFrame = av_frame_alloc();
int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
uint8_t *dstbuf = new uint8_t[nDstSize];
avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight);
memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4);
memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4);
for (int i = 0; i < decoders_got_frame.size(); i++){
if (i != idxTeacher) {
idxTeacher = i;
break;
}
}
//fill the timestamp of dest frame
//send to encoder
}
return 0;
}
int CAVTranscoder::fillDestFrame(AVFrame * pDstFrame, AVFrame * pSrcFrame, int x, int y)
{
for (int i = 0; i < pSrcFrame->height; i++) {
memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]);
}
for (int i = 0; i < pSrcFrame->height / 2; i++){
memcpy(pDstFrame->data[1] + (y / 2)*pDstFrame->linesize[1] + x / 2, pSrcFrame->data[1] + i * pSrcFrame->linesize[1], pSrcFrame->linesize[1]);
memcpy(pDstFrame->data[2] + (y / 2)*pDstFrame->linesize[2] + x / 2, pSrcFrame->data[2] + i * pSrcFrame->linesize[2], pSrcFrame->linesize[2]);
}
return 0;
}
int CAVTranscoder::mix_and_output_one2one_vframe(vector<CAVDecoder *> & decoders_got_frame)
... ... @@ -249,13 +292,19 @@ int CAVTranscoder::open_output_file(const char *filename)
}
else {
fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0);
//todo: fill the bottom half image with pure color
}
//fill the timestamp of dest frame
//send to encoder
return 0;
}
int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
int ret;
int got_frame_local;
AVPacket enc_pkt;
... ...
... ... @@ -12,8 +12,6 @@ public:
int64_t transcode();
bool all_processed();
int close();
protected:
int open_output_file(const char *filename);
protected:
... ...
... ... @@ -3,7 +3,8 @@ extern AVRational timebase_ms;
CVideoDecoder::CVideoDecoder() :
_start_time(-10.0),
_is_finished(false)
_is_finished(false),
_uid(0)
{
}
... ... @@ -16,6 +17,7 @@ int CVideoDecoder::add(media_info &info)
{
if (_start_time < -1.0) {//the the start time of this decoder
_start_time = info.start_time;
_uid = info.uid;
}
_info.push_back(info);
... ... @@ -307,7 +309,7 @@ int CVideoDecoder::filter_encode_write_frame(AVFrame *frame, unsigned int stream
unsigned int CVideoDecoder::getuid()
{
return 0;
return _uid;
}
int CVideoDecoder::get_one_frame(AVFrame ** pFrame, int64_t & ts)
... ...
... ... @@ -12,9 +12,9 @@ public:
unsigned int getuid();
int get_one_frame(AVFrame ** pFrame, int64_t & ts);
protected:
int open_input_file(const char *filename);
protected:
int init_filters(void);
int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, const char *filter_spec);
int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index);
... ... @@ -30,5 +30,6 @@ protected:
bool _is_finished;
AVRational _codec_timebase;
unsigned int _uid;
};
... ...
... ... @@ -826,6 +826,18 @@ void get_outinfo_file_name(char * input)
strcat(out_info_file, "_out.txt");
}
string get_outmedia_file_name(const char * input)
{
char out_media_file[1024];
strcpy(out_media_file, input);
char * p = strstr(out_media_file, ".");
if (p) {
*p = 0;
}
strcat(out_media_file, "_out.mp4");
return out_media_file;
}
int load_record_info(char * record_info)
{
ifstream fin(record_info);
... ... @@ -884,6 +896,16 @@ int process_av_files()
int64_t cur_time = 0;
bool has_file = sorted_media.size();
if (has_file) {
media_info info = sorted_media.front();
std::string m = get_outmedia_file_name(info.name.c_str());
int ret = videoTranscoder.open_output_file(m.c_str());
if (ret) {
return ret;
}
}
while (has_file){
while (has_file){
media_info info = sorted_media.front();
... ...
... ... @@ -36,3 +36,9 @@ time_t calc_sec1970(int Y, int M, int D, int h, int m, int s)
return sec;
}
void RGB2YUV(unsigned char r, unsigned char g, unsigned char b, unsigned char *y, unsigned char * u, unsigned char * v){
*y = (unsigned char)(0.30*r + 0.59*g + 0.11*b);
*u = (unsigned char)(0.493*(b - (*y)));
*v = (unsigned char)(0.877*(r - (*y)));
}
... ...
... ... @@ -2,4 +2,5 @@
#define TOOLS_H
#include <time.h>
time_t calc_sec1970(int Y, int M, int D, int h, int m, int s);
void RGB2YUV(unsigned char r, unsigned char g, unsigned char b, unsigned char *y, unsigned char * u, unsigned char * v);
#endif
\ No newline at end of file
... ...