胡斌

1.keep still image in one2one mode

2.flip image when retote 180
3.save record info as m_*
4.use stream time base when decode,don't use decode codec time base,because it is varied in some mp4 file
... ... @@ -8,8 +8,8 @@ extern "C" {
#pragma comment(lib,"swscale.lib")
#endif
#define SCALED_W 80
#define SCALED_H 60
#define SCALED_W 100
#define SCALED_H 75
#define SRC_W 320
#define SRC_H 240
uint8_t blank_r = 0x16;
... ... @@ -25,7 +25,9 @@ _cur_out_a_ts(0),
_max_audio(1),
_swsCtx(NULL),
_scaledFrame(NULL),
_last_videos_got(-1)
_last_videos_got(-1),
_teacherFrame(NULL),
_studentFrame(NULL)
{
_one2one = bOne2One;
if (_one2one) {
... ... @@ -54,6 +56,8 @@ _last_videos_got(-1)
CAVTranscoder::~CAVTranscoder()
{
av_frame_free(&_teacherFrame);
av_frame_free(&_studentFrame);
}
int CAVTranscoder::add(media_info & info)
... ... @@ -369,7 +373,7 @@ int CAVTranscoder::open_output_file(const char *filename)
}
//copy each frame to the dest frame
fillDestFrame(pDstFrame, _scaledFrame, SRC_W - SCALED_H - 10 - (imageIdx % 4) * (SCALED_H + 10), SRC_H - SCALED_H - (SCALED_H + 8)*(imageIdx / 4), (SCALED_W - SCALED_H) / 2, 0, SCALED_H, SCALED_H);
fillDestFrame(pDstFrame, _scaledFrame, SRC_W - 3 - (imageIdx % 4) * (SCALED_H + 5), SRC_H - SCALED_H - (SCALED_H + 2)*(imageIdx / 4), (SCALED_W - SCALED_H) / 2, 0, SCALED_H-1, SCALED_H-1);
imageIdx++;
}
}
... ... @@ -396,14 +400,45 @@ int CAVTranscoder::open_output_file(const char *filename)
if (!pSrcFrame){
return 0;
}
if (pSrcFrame->pkt_dts == 180) {
unsigned char * startSrcY = pSrcFrame->data[0] + (pSrcFrame->height) * pSrcFrame->linesize[0] - 1;
for (int i = 0; i < pSrcFrame->height; i++) {
//memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, startSrcY - i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]);
unsigned char * psrc = startSrcY - i * pSrcFrame->linesize[0];
unsigned char * pdst = pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x;
unsigned char * pdst_end = pdst + pSrcFrame->linesize[0];
for (; pdst < pdst_end; psrc--, pdst++) {
*pdst = *psrc;
}
}
unsigned char * startSrcU = pSrcFrame->data[1] + (pSrcFrame->height/2 ) * pSrcFrame->linesize[1] -1;
unsigned char * startSrcV = pSrcFrame->data[2] + (pSrcFrame->height/2) * pSrcFrame->linesize[2] -1;
for (int i = 0; i < pSrcFrame->height / 2; i++){
//memcpy(pDstFrame->data[1] + (y / 2 + i)*pDstFrame->linesize[1] + x / 2, startSrcU - i * pSrcFrame->linesize[1], pSrcFrame->linesize[1]);
//memcpy(pDstFrame->data[2] + (y / 2 + i)*pDstFrame->linesize[2] + x / 2, startSrcV - i * pSrcFrame->linesize[2], pSrcFrame->linesize[2]);
unsigned char * psrc = startSrcU - i * pSrcFrame->linesize[1];
unsigned char * pdst = pDstFrame->data[1] + (y/2 + i)*pDstFrame->linesize[1] + x/2;
unsigned char * pdst_end = pdst + pSrcFrame->linesize[1];
for (; pdst < pdst_end; psrc--, pdst++) {
*pdst = *psrc;
}
psrc = startSrcV - i * pSrcFrame->linesize[2];
pdst = pDstFrame->data[2] + (y / 2 + i)*pDstFrame->linesize[2] + x / 2;
pdst_end = pdst + pSrcFrame->linesize[2];
for (; pdst < pdst_end; psrc--, pdst++) {
*pdst = *psrc;
}
}
}
else {
for (int i = 0; i < pSrcFrame->height; i++) {
memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0]>0 ? pSrcFrame->linesize[0] : -pSrcFrame->linesize[0]);
memcpy(pDstFrame->data[0] + (y + i)*pDstFrame->linesize[0] + x, pSrcFrame->data[0] + i * pSrcFrame->linesize[0], pSrcFrame->linesize[0] > 0 ? pSrcFrame->linesize[0] : -pSrcFrame->linesize[0]);
}
for (int i = 0; i < pSrcFrame->height / 2; i++){
memcpy(pDstFrame->data[1] + (y / 2 + i)*pDstFrame->linesize[1] + x / 2, pSrcFrame->data[1] + i * pSrcFrame->linesize[1], pSrcFrame->linesize[1]>0 ? pSrcFrame->linesize[1] : -pSrcFrame->linesize[1]);
memcpy(pDstFrame->data[2] + (y / 2 + i)*pDstFrame->linesize[2] + x / 2, pSrcFrame->data[2] + i * pSrcFrame->linesize[2], pSrcFrame->linesize[2]>0 ? pSrcFrame->linesize[2] : -pSrcFrame->linesize[2]);
memcpy(pDstFrame->data[1] + (y / 2 + i)*pDstFrame->linesize[1] + x / 2, pSrcFrame->data[1] + i * pSrcFrame->linesize[1], pSrcFrame->linesize[1] > 0 ? pSrcFrame->linesize[1] : -pSrcFrame->linesize[1]);
memcpy(pDstFrame->data[2] + (y / 2 + i)*pDstFrame->linesize[2] + x / 2, pSrcFrame->data[2] + i * pSrcFrame->linesize[2], pSrcFrame->linesize[2] > 0 ? pSrcFrame->linesize[2] : -pSrcFrame->linesize[2]);
}
}
return 0;
}
... ... @@ -429,18 +464,46 @@ int CAVTranscoder::open_output_file(const char *filename)
uint8_t *dstbuf = new uint8_t[nDstSize];
avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
for (unsigned int i = 0; i < decoders_got_frame.size(); i++) {
if (decoders_got_frame[i]->_media_role == mr_teacher) {
AVFrame * pFrame = decoders_got_frame[i]->_cur_v_frame;
if (pFrame) {
av_frame_free(&_teacherFrame);
_teacherFrame = pFrame;
decoders_got_frame[i]->_cur_v_frame = NULL;
}
}
else {
AVFrame * pFrame = decoders_got_frame[i]->_cur_v_frame;
if (pFrame) {
av_frame_free(&_studentFrame);
_studentFrame = pFrame;
decoders_got_frame[i]->_cur_v_frame = NULL;
}
}
}
if (!_teacherFrame && !_studentFrame) {
memset(pDstFrame->data[0], _blank_y, _nOutputWidth * _nOutputHeight);
memset(pDstFrame->data[1], _blank_u, _nOutputWidth *_nOutputHeight / 4);
memset(pDstFrame->data[1], _blank_u, _nOutputWidth * _nOutputHeight / 4);
memset(pDstFrame->data[2], _blank_v, _nOutputWidth * _nOutputHeight / 4);
}
if (_teacherFrame) {
fillDestFrame(pDstFrame, _teacherFrame, 0, 0);
if (_studentFrame) {
fillDestFrame(pDstFrame, _studentFrame, 0, 240);
}
}
else if (_studentFrame) {
fillDestFrame(pDstFrame, _studentFrame, 0, 0);
}
if (decoders_got_frame.size() == 2){
if (_last_videos_got != 2) {
_last_videos_got = 2;
printf("\n--get 2 video:%"PRIu64", %s, %s\n", _cur_out_v_ts, decoders_got_frame[0]->get_cur_vfile().c_str(), decoders_got_frame[1]->get_cur_vfile().c_str());
}
fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, decoders_got_frame[0]->_media_role == mr_teacher ? 0 : 240);
fillDestFrame(pDstFrame, decoders_got_frame[1]->_cur_v_frame, 0, decoders_got_frame[1]->_media_role == mr_teacher ? 0 : 240);
}
else if (decoders_got_frame.size() == 1)
{
... ... @@ -448,15 +511,12 @@ int CAVTranscoder::open_output_file(const char *filename)
_last_videos_got = 1;
printf("\n--get 1 video:%"PRIu64", %s\n", _cur_out_v_ts, decoders_got_frame[0]->get_cur_vfile().c_str());
}
fillDestFrame(pDstFrame, decoders_got_frame[0]->_cur_v_frame, 0, 0);
//todo: fill the bottom half image with pure color
}
else {
if (_last_videos_got != 0) {
_last_videos_got = 0;
printf("\n--get 0 video:%"PRIu64"\n", _cur_out_v_ts);
}
//fill with last image?
}
#if 0
if (_cur_out_v_ts > (26 * 60 + 57) *(1000 /50)) {
... ...
... ... @@ -44,6 +44,8 @@ private:
struct SwsContext * _swsCtx;
AVFrame * _scaledFrame;
int _last_videos_got;
AVFrame * _teacherFrame;
AVFrame * _studentFrame; // for one2one,keep the last frame
uint8_t _blank_y, _blank_u, _blank_v;
public:
... ...
... ... @@ -267,6 +267,7 @@ int CVideoDecoder::open_input_file(const char *filename)
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
codec_ctx->time_base = stream->time_base;
}
}
... ... @@ -312,12 +313,10 @@ int CVideoDecoder::filter_encode_write_frame(AVFrame *frame, unsigned int stream
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
if (_rotate == 180) {
filt_frame->data[0] += filt_frame->linesize[0] * (filt_frame->height - 1);
filt_frame->linesize[0] = -filt_frame->linesize[0];
filt_frame->data[1] += filt_frame->linesize[1] * (filt_frame->height / 2 - 1);
filt_frame->linesize[1] = -filt_frame->linesize[1];
filt_frame->data[2] += filt_frame->linesize[2] * (filt_frame->height / 2 - 1);
filt_frame->linesize[2] = -filt_frame->linesize[2];
filt_frame->pkt_dts = 180;//use this to indicate rotate 180
}
else {
filt_frame->pkt_dts = 0;
}
_decoded_frames.push_back(filt_frame);
}
... ... @@ -372,9 +371,9 @@ int CVideoDecoder::get_one_frame(AVFrame ** pFrame, int64_t & ts)
break;
}
av_packet_rescale_ts(&packet,
/* av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ifmt_ctx->streams[stream_index]->codec->time_base);
ifmt_ctx->streams[stream_index]->codec->time_base);*/
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
... ...
... ... @@ -652,6 +652,7 @@ void add_media_infos()
printf("\n-------------------------\n");
it = audio_type_files.begin();
if (it != audio_type_files.end()) {
media_info last = *it;
for (it++; it != audio_type_files.end(); it++) {
... ... @@ -668,6 +669,7 @@ void add_media_infos()
last = *it;
}
}
}
}
void unifiy_start_time()
... ... @@ -890,23 +892,30 @@ void load_codec_param()
void get_outinfo_file_name(char * input)
{
#if 0
strcpy(out_info_file, input);
char * p = strstr(out_info_file, ".");
if (p) {
*p = 0;
}
strcat(out_info_file, "_out.txt");
#else
strcpy(out_info_file, "m_");
strcat(out_info_file, input);
#endif
}
string get_outmedia_file_name(const char * input)
{
char out_media_file[1024];
strcpy(out_media_file, input);
strcpy(out_media_file, "m_");
strcat(out_media_file, input);
char * p = strstr(out_media_file, ".");
if (p) {
*p = 0;
}
strcat(out_media_file, "_out.mp4");
strcat(out_media_file, ".mp4");
return out_media_file;
}
... ...