胡斌

remove unused files

one2one transcode works,need add still frame when one parter has no video but has audio
add some code about swscale,unfinished
... ... @@ -96,8 +96,6 @@ AVFrame * CAVDecoder::get_silence_frame()
return NULL;
}
void CAVDecoder::free_cur_a_frame()
{
if (_cur_a_frame) {
... ... @@ -105,7 +103,6 @@ void CAVDecoder::free_cur_a_frame()
}
}
void CAVDecoder::free_cur_v_frame()
{
if (_cur_v_frame) {
... ...
#include "AVTranscoder.h"
extern "C" {
#include <libswscale/swscale.h>
}
#ifdef WIN32
#pragma comment(lib,"swscale.lib")
#endif
#define SCALED_W 80
#define SCALED_H 60
#define SRC_W 320
#define SRC_H 240
CAVTranscoder::CAVTranscoder():
_start_time(INT64_MAX),
... ... @@ -8,13 +18,17 @@ _one2one(true),
_nOutputWidth(320),
_cur_out_v_ts(0),
_cur_out_a_ts(0),
_max_audio(1)
_max_audio(1),
_swsCtx(NULL)
{
if (_one2one) {
_nOutputHeight = 480;
}
else {
_nOutputHeight = 240;
_swsCtx = sws_getContext(SRC_W, SRC_H, PIX_FMT_YUV420P,
SCALED_W, SCALED_H, PIX_FMT_YUV420P, SWS_BILINEAR,
NULL, NULL, NULL);
}
}
... ... @@ -95,6 +109,12 @@ bool CAVTranscoder::all_processed()
int CAVTranscoder::close()
{
if (_swsCtx) {
sws_freeContext(_swsCtx);
_swsCtx = NULL;
}
flush_encoder(0);
flush_encoder(1);
av_write_trailer(_ofmt_ctx);
#if USE_H264BSF
... ... @@ -258,6 +278,7 @@ int CAVTranscoder::open_output_file(const char *filename)
for (int i = 0; i < 1024; i++,pdst++,psrc++) {
*pdst += (*psrc/_max_audio);
}
(*it)->free_cur_a_frame();
}
}
... ... @@ -284,41 +305,61 @@ int CAVTranscoder::open_output_file(const char *filename)
}
}
AVFrame *pDstFrame = av_frame_alloc();
int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
uint8_t *dstbuf = new uint8_t[nDstSize];
avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
if (idxTeacher != -1) {
// the dest frame is the teacher frame
//copy teacher frame to dest frame
CAVDecoder * pDecoder = decoders_got_frame[idxTeacher];
AVFrame * pFrame = pDecoder->_cur_v_frame;
if (pFrame) {
fillDestFrame(pDstFrame, pFrame, 0, 0);
}
else {//fill with pure color
memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight);
memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4);
memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4);
}
for (int i = 0; i < decoders_got_frame.size(); i++){
if (i != idxTeacher) {
//scale eacher frame
//copy each frame to the dest frame
}
}
// set the timestamp of teacher frame
//send to encoder
}
else {
AVFrame *pDstFrame = av_frame_alloc();
int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
uint8_t *dstbuf = new uint8_t[nDstSize];
avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, _nOutputWidth, _nOutputHeight);
memset(pDstFrame->data[0], 0, _nOutputWidth * _nOutputHeight);
memset(pDstFrame->data[1], 0x80, _nOutputWidth *_nOutputHeight / 4);
memset(pDstFrame->data[2], 0x80, _nOutputWidth * _nOutputHeight / 4);
for (int i = 0; i < decoders_got_frame.size(); i++){
if (i != idxTeacher) {
idxTeacher = i;
break;
//scale eacher frame
//copy each frame to the dest frame
}
}
}
//fill the timestamp of dest frame
//fill the timestamp of dest frame
pDstFrame->pts = _cur_out_v_ts;
pDstFrame->pkt_dts = _cur_out_v_ts;
pDstFrame->pkt_pts = _cur_out_v_ts;
pDstFrame->format = AV_PIX_FMT_YUV420P;
pDstFrame->width = _nOutputWidth;
pDstFrame->height = _nOutputHeight;
_cur_out_v_ts++;
//send to encoder
}
//send to encoder
int got_frame = 0;
encode_write_frame(pDstFrame, 0, &got_frame);
delete dstbuf;
return 0;
}
... ... @@ -400,7 +441,6 @@ int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_i
av_init_packet(&enc_pkt);
ret = enc_func(_ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
filt_frame, got_frame);
av_frame_unref(filt_frame);
av_frame_free(&filt_frame);
if (ret < 0)
return ret;
... ... @@ -413,7 +453,6 @@ int CAVTranscoder::encode_write_frame(AVFrame *filt_frame, unsigned int stream_i
_ofmt_ctx->streams[stream_index]->codec->time_base,
_ofmt_ctx->streams[stream_index]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(_ofmt_ctx, &enc_pkt);
... ...
... ... @@ -40,6 +40,7 @@ private:
void * _a_frame_pool;
int _max_audio;
struct SwsContext * _swsCtx;
public:
void set_max_audio(int max_audio);
};
... ...
#include "AudioEncoder.h"
CAudioEncoder::CAudioEncoder()
{
}
CAudioEncoder::~CAudioEncoder()
{
}
#pragma once
class CAudioEncoder
{
public:
CAudioEncoder();
virtual ~CAudioEncoder();
};
#include "VideoEncoder.h"
CVideoEncoder::CVideoEncoder()
{
}
CVideoEncoder::~CVideoEncoder()
{
}
#pragma once
class CVideoEncoder
{
public:
CVideoEncoder();
virtual ~CVideoEncoder();
};
... ... @@ -637,8 +637,10 @@ void add_media_infos()
}
}
printf("\nsorted file info:");
it = sorted_media.begin();
for (; it != sorted_media.end();){
printf("\n%.3f %s %s", it->type_time, it->name.c_str(), it->t_type == tt_start ? "start" : "end");
if (it->t_type == tt_end) {
it = sorted_media.erase(it);
}
... ... @@ -647,6 +649,7 @@ void add_media_infos()
it++;
}
}
printf("\n-------------------------\n");
it = audio_type_files.begin();
media_info last = *it;
... ... @@ -959,10 +962,15 @@ int process_av_files()
if (has_file) {
media_info info = sorted_media.front();
std::string m = get_outmedia_file_name(info.name.c_str());
printf("open output file:%s\n", m.c_str());
int ret = videoTranscoder.open_output_file(m.c_str());
if (ret) {
printf("open output file:%s fail!\n", m.c_str());
return ret;
}
else {
printf("open output file:%s success!\n", m.c_str());
}
}
... ... @@ -997,8 +1005,7 @@ int main(int argc, char * argv[])
if (argc < 2) {
printf(" merge_pip 2.0.0\n");
printf(" merge video files to one pip video according to record info file,\nusage:");
printf("\n %s record_info_filename [-p] [-k]", argv[0]);
printf("\n -d :individual files for different time segment");
printf("\n %s record_info_filename", argv[0]);
printf("\n\n");
return -1;
}
... ...
... ... @@ -83,22 +83,18 @@
</ItemGroup>
<ItemGroup>
<ClCompile Include="AudioDecoder.cpp" />
<ClCompile Include="AudioEncoder.cpp" />
<ClCompile Include="AVDecoder.cpp" />
<ClCompile Include="merge_pip.cpp" />
<ClCompile Include="tools.cpp" />
<ClCompile Include="VideoDecoder.cpp" />
<ClCompile Include="AVTranscoder.cpp" />
<ClCompile Include="VideoEncoder.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="AudioDecoder.h" />
<ClInclude Include="AudioEncoder.h" />
<ClInclude Include="AVDecoder.h" />
<ClInclude Include="media_info.h" />
<ClInclude Include="VideoDecoder.h" />
<ClInclude Include="AVTranscoder.h" />
<ClInclude Include="VideoEncoder.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
... ...
... ... @@ -33,18 +33,9 @@
<ClCompile Include="AVTranscoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="VideoEncoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="AudioEncoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="merge_pip.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="framepool.cpp">
<Filter>源文件</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="VideoDecoder.h">
... ... @@ -62,14 +53,5 @@
<ClInclude Include="AVTranscoder.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="VideoEncoder.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="AudioEncoder.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="framepool.h">
<Filter>头文件</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
... ...