胡斌

add some code about new merge_pip

#include "VideoDecoder.h"
CVideoDecoder::CVideoDecoder()
{
}
CVideoDecoder::~CVideoDecoder()
{
}
int CVideoDecoder::add(media_info &info)
{
return 0;
}
unsigned int CVideoDecoder::getuid()
{
return 0;
}
... ...
#pragma once
#include <string>
#include <vector>
using namespace std;
enum media_type{
mt_audio = 0,
mt_video = 1,
mt_av = 3,
};
enum media_role {
mr_teacher = 0,
mr_student = 1,
};
enum timestamp_type{
tt_start = 0,
tt_end = 1,
};
class media_info {
public:
float type_time;//the time for start or end according to the m_type
float start_time;
float end_time;
string name;
int rotate;
float duration;
int index;
unsigned int uid;
media_type m_type;
media_role m_role;
timestamp_type t_type;
};
class CVideoDecoder
{
public:
CVideoDecoder();
virtual ~CVideoDecoder();
int add(media_info &info);
unsigned int getuid();
protected:
vector<media_info> _info;
};
... ...
#include "VideoTranscoder.h"
CVideoTranscoder::CVideoTranscoder()
{
}
CVideoTranscoder::~CVideoTranscoder()
{
}
int CVideoTranscoder::add(media_info & info)
{
vector < CVideoDecoder *>::iterator it = _decoders.begin();
for (; it != _decoders.end(); it++) {
if ((*it)->getuid() == info.uid){
(*it)->add(info);
break;
}
}
if (it == _decoders.end()) {
CVideoDecoder * pVideoDecoder = new CVideoDecoder();
pVideoDecoder->add(info);
_decoders.push_back(pVideoDecoder);
}
return 0;
}
float CVideoTranscoder::transcode()
{
throw std::logic_error("The method or operation is not implemented.");
}
bool CVideoTranscoder::all_processed()
{
throw std::logic_error("The method or operation is not implemented.");
}
int CVideoTranscoder::close()
{
throw std::logic_error("The method or operation is not implemented.");
}
... ...
#pragma once
#include "VideoDecoder.h"
class CVideoTranscoder
{
public:
CVideoTranscoder();
virtual ~CVideoTranscoder();
int add(media_info & info);
float transcode();
bool all_processed();
int close();
protected:
vector < CVideoDecoder *> _decoders;
};
... ...
... ... @@ -8,28 +8,12 @@
#include <list>
#include <deque>
#include "tools.h"
#include "VideoTranscoder.h"
bool only_print = false;
bool keep_tmp_files = false;
bool out_one_video = true;
using namespace std;
enum media_type{
mt_audio = 0,
mt_video = 1,
mt_av = 3,
};
enum media_role {
mr_teacher =0,
mr_student =1,
};
enum timestamp_type{
tt_start = 0,
tt_end = 1,
};
class fileinfo {
public:
... ... @@ -37,25 +21,12 @@ public:
float end_time;
string name;
int index;
unsigned int uid;
media_type m_type;
media_role m_rote;
media_role m_role;
int rotate; //degree,0,90,180 ...
};
class media_info {
public:
float type_time;//the time for start or end according to the m_type
float start_time;
float end_time;
string name;
int rotate;
float duration;
int index;
media_type m_type;
media_role m_rote;
timestamp_type t_type;
};
vector<fileinfo> media_files;
... ... @@ -125,7 +96,7 @@ char acodec_param[1024];
char pip_param[1024];
char pip1_param[1024];
void addinfo(float t, string name, bool bstart,media_role role){
void addinfo(float t, string name, bool bstart,media_role role,unsigned int uid){
media_type mtype = name.substr(name.length() - 4, name.length()) == ".aac" ? mt_audio : mt_video;
if (bstart) {
fileinfo f;
... ... @@ -133,7 +104,8 @@ void addinfo(float t, string name, bool bstart,media_role role){
f.end_time = f.start_time;
f.name = name;
f.m_type = mtype;
f.m_rote = role;
f.m_role = role;
f.uid = uid;
f.rotate = 0;
media_files.push_back(f);
... ... @@ -169,16 +141,7 @@ void addinfo(const char * t, const char * name, const char * rotation){
}
}
void addinfo(float start, float duration, string name, int channel){
media_type mtype = mt_av;
fileinfo f;
f.start_time = start;
f.end_time = f.start_time + duration;
f.name = name;
f.m_type = mtype;
media_files.push_back(f);
}
void split(string str, string separator, vector<string> &result, bool includeEmptyItem = false) {
result.clear();
... ... @@ -501,48 +464,6 @@ void check_audio_duration()
only_print = tmp;
}
int merge_audio_file(vector<string> & files, const char * dest)
{
char buf[2048],tsfile[1024];
vector<string> tsfiles;
for (int i = 0; i < files.size(); i++){
strcpy(tsfile, files[i].c_str());
strcat(tsfile, ".ts");
tsfiles.push_back(tsfile);
sprintf(buf, "ffmpeg -y -i %s -acodec copy -vn %s", files[i].c_str(), tsfile);
run_shell_cmd(buf);
}
sprintf(tsfile, "m_%s.ts", dest);
concate_files(tsfiles, tsfile);
adjust_dest_timecode(tsfile, dest);
if (!keep_tmp_files){
tsfiles.push_back(tsfile);
removefiles(tsfiles);
}
return 0;
}
int merge_av_file(vector<string> & files, const char * dest)
{
char tsfile[1024];
vector<string> tsfiles;
sprintf(tsfile, "m_%s.ts", dest);
concate_files(files, tsfile);
adjust_dest_timecode(tsfile, dest);
if (!keep_tmp_files){
tsfiles.push_back(tsfile);
removefiles(tsfiles);
}
return 0;
}
list <media_info> sorted_media;
... ... @@ -671,6 +592,8 @@ void add_media_infos()
m.duration = f.end_time - f.start_time;
m.type_time = m.start_time;
m.rotate = f.rotate;
m.m_role = f.m_role;
m.uid = f.uid;
add_media_info(m);
m.t_type = tt_end;
m.type_time = m.end_time;
... ... @@ -727,477 +650,8 @@ void init_merge_pip()
}
int merge_audio_video(vector<media_info> & files)
{
vector<string> merge_audio_files;
int nsilence = 0;
media_info video = files[0];
float start_time = video.start_time;
for (int i = 1; i < files.size() - 1; i += 2){
media_info audio = files[i];
media_info audio_end = files[i + 1];
if (audio.type_time - start_time > 0.1){
sprintf(audio_file, "%d_%d_silence.aac", nf, nsilence++);//a duration of silence
split_audio(silence_aac_file, 0, audio.type_time - start_time, audio_file);
merge_audio_files.push_back(audio_file);
tmp_files.push_back(audio_file);
}
if (audio.type_time - audio.start_time > 0.10 || audio_end.end_time - audio_end.type_time > 0.10) {
sprintf(audio_file, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), audio.type_time - audio.start_time, audio_end.type_time - audio.type_time, audio_file);
tmp_files.push_back(audio_file);
}
else{
strcpy(audio_file, audio.name.c_str());
}
start_time = audio_end.type_time;
merge_audio_files.push_back(audio_file);
if (i == files.size() - 2){
if (video.end_time - audio_end.type_time > 0.1){
sprintf(audio_file, "%d_%d_silence.aac", nf, nsilence++);//a duration of silence
split_audio(silence_aac_file, 0, video.end_time - audio_end.type_time, audio_file);
merge_audio_files.push_back(audio_file);
tmp_files.push_back(audio_file);
}
}
}
sprintf(audio_file, "%d_merged.aac", nf);
merge_audio_file(merge_audio_files, audio_file);
tmp_files.push_back(audio_file);
sprintf(destfile, "%d.ts", nf);
merge_audio_video(audio_file, video.name.c_str(), video.rotate, destfile);
merged_files.push_back(destfile);
nf++;
return 0;
}
int merge_audio_pic(vector<media_info> & files)
{
media_info audio = files[0];
media_info audio_end = files[1];
if (audio.type_time - audio.start_time > 0.10 || audio_end.end_time - audio_end.type_time > 0.10) {
sprintf(audio_file, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), audio.type_time - audio.start_time, audio_end.type_time - audio.type_time, audio_file);
tmp_files.push_back(audio_file);
}
else{
strcpy(audio_file, audio.name.c_str());
}
sprintf(destfile, "%d.ts", nf);
int i = 0;
for (; i < sorted_infos.size(); i++){
if (sorted_infos[i].m_type == mt_video){
string name = sorted_infos[i].name;
sprintf(pic_file, "%s.jpg", name.c_str());
get_video_first_frame_jpeg(name.c_str(), pic_file);
tmp_files.push_back(pic_file);
break;
}
}
if (i == sorted_infos.size()){
strcpy(pic_file, blank_pic_file);
}
merge_audio_pic(audio_file, pic_file, destfile);
merged_files.push_back(destfile);
nf++;
return 0;
}
int find_video_between_the_audio()
{
int index = sorted_infos[0].index;
int video_index = 0;
for (int i = 1; i < sorted_infos.size(); i++){
if (sorted_infos[i].index == index)
break;
if (sorted_infos[i].m_type == mt_video)
{
video_index = i;
break;
}
}
return video_index;
}
int transcode_file(vector<media_info> files)
{
char video_file[1024];
media_info video = files[0];
media_info video_end = files[1];
if (video.type_time - video.start_time > 0.10 || video_end.end_time - video_end.type_time > 0.10) {
sprintf(video_file, "%d_%s", nf, video.name.c_str());
split_av_for_1pip(video.name.c_str(), video.type_time - video.start_time, video_end.type_time - video.type_time, video_file);
tmp_files.push_back(video_file);
}
else{
sprintf(video_file, "%d_%s", nf, video.name.c_str());
transcode_to_1pip(video.name.c_str(), video_file);
tmp_files.push_back(video_file);
}
merged_files.push_back(video_file);
nf++;
return 0;
}
int find_file_between_the_firstfile()
{
int index = sorted_infos[0].index;
int video_index = -1;
for (int i = 1; i < sorted_infos.size(); i++){
if (sorted_infos[i].index == index)
break;
video_index = i;
}
return video_index;
}
int find_video_end()
{
int index = sorted_infos[0].index;
int video_index = 0;
for (int i = 1; i < sorted_infos.size(); i++){
if (sorted_infos[i].index == index){
video_index = i;
break;
}
}
return video_index;
}
bool is_audio_start(int index)
{
return (sorted_infos[index].m_type == mt_audio && sorted_infos[index].t_type == tt_start);
}
int split_audio_for_video(int audio_start,vector<media_info> & result)
{
media_info audio = sorted_infos[audio_start];
media_info video = sorted_infos[audio_start + 1];
result.clear();
for (int i = 0; i <= audio_start; i++){
result.push_back(sorted_infos[i]);
}
if (sorted_infos[audio_start + 2].index == sorted_infos[audio_start].index){
if (sorted_infos[audio_start + 2].type_time - sorted_infos[audio_start + 1].type_time < 0.1){//no need to split
result.push_back(sorted_infos[audio_start + 2]);//put the audio end to the result
result.push_back(video);//push the video end to the result
for (int i = 0; i <= audio_start + 2; i++){ //remove the infos including the audio end
sorted_infos.pop_front();
}
return 1;
}
}
audio.t_type = tt_end;
audio.type_time = video.type_time;
result.push_back(audio);
result.push_back(video);
for (int i = 0; i <= audio_start +1; i++){ //remove the infos including the video end
sorted_infos.pop_front();
}
audio.t_type = tt_start;
sorted_infos.push_front(audio);
return 0;
}
int split_ch0_for_ch1(vector<media_info> & result)
{
media_info firstend = sorted_infos[2];
media_info next = sorted_infos[3];
result.clear();
if (sorted_infos[0].index == firstend.index) {
for (int i = 0; i <= 2; i++){
result.push_back(sorted_infos[i]);
}
}
else {
result.push_back(sorted_infos[1]);
result.push_back(sorted_infos[0]);
result.push_back(sorted_infos[2]);
}
if (next.t_type == tt_end){
if (next.type_time - firstend.type_time < 0.1){//no need to split
result.push_back(next);//push the video end to the result
for (int i = 0; i < 4; i++){ //remove the infos
sorted_infos.pop_front();
}
return 1;
}
next.type_time = firstend.type_time;
result.push_back(next);
for (int i = 0; i < 3; i++){ //remove the infos including the video end
sorted_infos.pop_front();
}
next.t_type = tt_start;
sorted_infos.push_front(next);
}
else {
media_info should_split = result[1];
should_split.t_type = tt_end;
should_split.type_time = firstend.type_time;
result.push_back(should_split);
for (int i = 0; i < 3; i++){ //remove the infos including the video end
sorted_infos.pop_front();
}
should_split.t_type = tt_start;
sorted_infos.push_front(should_split);
}
return 0;
}
int split_audio_for_pic(vector<media_info> & result)
{
media_info audio = sorted_infos[0];
media_info video = sorted_infos[1];
result.clear();
for (int i = 0; i < 1; i++){
result.push_back(sorted_infos[i]);
}
if (sorted_infos[2].index == sorted_infos[0].index){
if (sorted_infos[2].type_time - sorted_infos[1].type_time < 0.1){//no need to split
result.push_back(sorted_infos[2]);//put the audio end to the result
for (int i = 0; i < 3; i++){
sorted_infos.pop_front();
}
sorted_infos.push_front(video);
return 1;
}
}
audio.t_type = tt_end;
audio.type_time = video.start_time;
result.push_back(audio);
for (int i = 0; i < 2; i++){
sorted_infos.pop_front();
}
audio.t_type = tt_start;
sorted_infos.push_front(audio);
sorted_infos.push_front(video);
return 0;
}
int split_ch0_for_no_process(vector<media_info> & result)
{
media_info first = sorted_infos[0];
media_info second = sorted_infos[1];
result.clear();
for (int i = 0; i < 1; i++){
result.push_back(sorted_infos[i]);
}
if (sorted_infos[2].index == sorted_infos[0].index){
if (sorted_infos[2].type_time - sorted_infos[1].type_time < 0.1){//no need to split
result.push_back(sorted_infos[2]);//put the audio end to the result
for (int i = 0; i < 3; i++){
sorted_infos.pop_front();
}
sorted_infos.push_front(second);
return 1;
}
}
first.t_type = tt_end;
first.type_time = second.start_time;
result.push_back(first);
for (int i = 0; i < 2; i++){
sorted_infos.pop_front();
}
first.t_type = tt_start;
sorted_infos.push_front(first);
sorted_infos.push_front(second);
return 0;
}
void get_front_info(int index_to, vector<media_info> &cur_processing)
{
cur_processing.clear();
for (int i = 0; i <= index_to; i++){
cur_processing.push_back(sorted_infos[i]);
}
for (int i = 0; i <= index_to; i++){
sorted_infos.pop_front();
}
}
int process_va()
{
vector<media_info> cur_processing;
while (sorted_infos.size())
{
media_type mt = sorted_infos[0].m_type;
if (mt == mt_audio){
int index = find_video_between_the_audio();
if (index > 0) //have_video
{
split_audio_for_pic(cur_processing);
}
else {
get_front_info(1, cur_processing);
}
merge_audio_pic(cur_processing);
}
else{
int index = find_video_end();
if (is_audio_start(index - 1)) {
split_audio_for_video(index - 1, cur_processing);
}
else {
get_front_info(index, cur_processing);
}
merge_audio_video(cur_processing);
}
}
return 0;
}
int process_files_to_1file(const char * output_dest_file)
{
//don't split video, for a video, using merged audios to mix with it
//for audio, mix with video or jpg
init_merge_av();
if (!media_files.size()){
return 0;
}
// judge if it is only one type
media_type mt = media_files[0].m_type;
bool only_one_type = true;
for (int i = 1; i < media_files.size(); i++){
if (mt != media_files[i].m_type){
only_one_type = false;
break;
}
}
if (only_one_type){
if (mt == mt_audio) {
if (media_files.size() == 1){
fileinfo audio = media_files[0];
merge_audio_pic(audio.name.c_str(), blank_pic_file, "dest.ts");
return 0;
}
for (int i = 0; i < media_files.size(); i++){
fileinfo audio = media_files[i];
sprintf(destfile, "%d.ts", nf);
merge_audio_pic(audio.name.c_str(), blank_pic_file, destfile);
merged_files.push_back(destfile);
nf++;
}
}
else {
if (media_files.size() == 1){
fileinfo video = media_files[0];
merge_video_silence(video, silence_aac_file, "dest.ts");
return 0;
}
for (int i = 0; i < media_files.size(); i++){
fileinfo video = media_files[i];
sprintf(destfile, "%d.ts", nf);
merge_video_silence(video, silence_aac_file, destfile);
merged_files.push_back(destfile);
nf++;
}
}
}
else {
process_va();
}
concate_files(merged_files, "m.ts");
tmp_files.push_back("m.ts");
adjust_dest_timecode("m.ts", output_dest_file);
if (!keep_tmp_files) {
removefiles(tmp_files);
removefiles(merged_files);
}
return 0;
}
int concate_files_and_adjust_timecode(const char * output_dest_file){
if (merged_files.size() == 1){
printf("rename %s to %s\n", merged_files[0].c_str(), output_dest_file);
remove(output_dest_file);
rename(merged_files[0].c_str(), output_dest_file);
}
else {
concate_files(merged_files, "m.ts");
tmp_files.push_back("m.ts");
adjust_dest_timecode("m.ts", output_dest_file);
}
if (!keep_tmp_files) {
removefiles(tmp_files);
removefiles(merged_files);
}
merged_files.clear();
tmp_files.clear();
return 0;
}
int get_output_file_name(int i, const char * filename, const char * prefix,char * outputfile){
int get_output_file_name(const char * filename, const char * prefix,char * outputfile){
char mainname[128];
const char * p = strstr(filename, ".");
if (p) {
... ... @@ -1208,36 +662,10 @@ int get_output_file_name(int i, const char * filename, const char * prefix,char
strcpy(mainname, filename);
}
sprintf(outputfile, "%s%s.ts",prefix, mainname, i);
sprintf(outputfile, "%s%s.ts",prefix, mainname);
return 0;
}
bool is_need_output(int & nOutPutFile, vector<media_info> & cur_processing, const char * first_file, char * outputfile, const char * prefix="")
{
if (sorted_infos.size()) {
float lastEnd = cur_processing[cur_processing.size() - 1].type_time;
float nextStart = sorted_infos.front().type_time;
float gap = nextStart - lastEnd;
if ( gap < 0.2) {
return false;
}
else if(out_one_video){
string last_merged_video = merged_files[merged_files.size() - 1];
char buf[1024];
sprintf(buf, "%s_last_frame.jpg", last_merged_video.c_str());
get_video_last_frame_jpeg(last_merged_video.c_str(), buf);
char buf_dest[1024];
sprintf(buf_dest, "%d.ts", nOutPutFile++);
merge_pic_silence(buf, gap, buf_dest);
merged_files.push_back(buf_dest);
tmp_files.push_back(buf);
return false;
}
}
get_output_file_name(nOutPutFile, first_file, prefix, outputfile);
return true;
}
void save_out_info(float start_time, char * outputfile)
{
... ... @@ -1247,70 +675,27 @@ void save_out_info(float start_time, char * outputfile)
}
}
int process_va_files()
{
char outputfile[1024];
vector<media_info> cur_processing;
int nOutPutFile = 0;
float start_time;
bool is_start = true;
string start_file;
while (sorted_infos.size())
{
media_type mt = sorted_infos[0].m_type;
if (mt == mt_audio){
int index = find_video_between_the_audio();
if (index > 0) //have_video
{
split_audio_for_pic(cur_processing);
}
else {
get_front_info(1, cur_processing);
}
merge_audio_pic(cur_processing);
}
else{
int index = find_video_end();
if (is_audio_start(index - 1)) {
split_audio_for_video(index - 1, cur_processing);
}
else {
get_front_info(index, cur_processing);
}
merge_audio_video(cur_processing);
}
//if the duration between the processed end and the start of not processed is large than 200 ms, reopen a new file
if (is_start){
start_time = cur_processing[0].start_time;
start_file = cur_processing[0].name;
is_start = false;
}
if (is_need_output(nOutPutFile, cur_processing, start_file.c_str(), outputfile, MERGED_PREFIX)){
nOutPutFile++;
concate_files_and_adjust_timecode(outputfile);
save_out_info(start_time, outputfile);
is_start = true;
}
}
return 0;
}
// parse the filename like 4165000_20180203013327202.aac
#define get_sub_str_to_x(x , source, len, result) strncpy(x, source, len); x[len] = 0; source += len; result = atoi(x);
time_t time_sec_1970_base = 0;
float get_start_time_from_filename(const char * filename)
float get_uid_start_time_from_filename(const char * filename, unsigned int &uid)
{
int year, month, day, hour, min, sec, minsec;
char buf[5];
const char * start = strstr(filename, "_");
const char * end = strstr(start + 1, "_");
if (end) {//get the next
*(char *)end = 0;
uid = atoi(start + 1);
*(char *)end = '_';
start = end;
}
else {
*(char *)start = 0;
uid = atoi(filename);
*(char *)start = '_';
}
start++;
end = strstr(start, ".");
... ... @@ -1347,7 +732,8 @@ int readfile(const char * filename, media_role role)
return -1;
}
float start_time = get_start_time_from_filename(filename);
unsigned int uid = 0;
float start_time = get_uid_start_time_from_filename(filename , uid);
const int LINE_LENGTH = 1000;
char str[LINE_LENGTH];
... ... @@ -1357,10 +743,10 @@ int readfile(const char * filename, media_role role)
split(str, " ", res);
if (res.size() >= 3) {
if (res[2] == "create"){
addinfo(start_time + atof(res[0].c_str()), res[1], true, role);
addinfo(start_time + atof(res[0].c_str()), res[1], true, role, uid);
}
else if (res[2] == "close") {
addinfo(start_time + atof(res[0].c_str()), res[1], false, role);
addinfo(start_time + atof(res[0].c_str()), res[1], false, role, uid);
}
else if (res[2] == "info") {
if (res.size() > 5) {
... ... @@ -1483,29 +869,6 @@ int load_record_info(char * record_info)
return 0;
}
int main(int argc, char * argv[])
{
if (argc < 2) {
printf(" merge_pip 2.0.0\n");
printf(" merge video files to one pip video according to record info file,\nusage:");
printf("\n %s record_info_filename [-p] [-k]", argv[0]);
printf("\n -d :individual files for different time segment");
printf("\n\n");
return -1;
}
get_config_path();
load_codec_param();
for (int i = 2; i < argc; i++){
if (!strcmp(argv[i], "-d")){
out_one_video = false;
}
}
load_record_info(argv[1]);
}
#define __STDC_FORMAT_MACROS
... ... @@ -1615,6 +978,10 @@ static int open_output_file(const char *filename)
dec_ctx = in_stream->codec;
enc_ctx = out_stream->codec;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* in this example, we choose transcoding to same codec */
... ... @@ -1635,6 +1002,12 @@ static int open_output_file(const char *filename)
enc_ctx->pix_fmt = encoder->pix_fmts[0];
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = dec_ctx->time_base;
enc_ctx->me_range = 16;
enc_ctx->max_qdiff = 4;
enc_ctx->qmin = 10;
enc_ctx->qmax = 30;
enc_ctx->qcompress = 0.6;
}
else {
enc_ctx->sample_rate = dec_ctx->sample_rate;
... ... @@ -1666,10 +1039,6 @@ static int open_output_file(const char *filename)
return ret;
}
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, filename, 1);
... ... @@ -1972,7 +1341,7 @@ static int flush_encoder(unsigned int stream_index)
return ret;
}
int transcode(int argc, char *argv[]){
int transcode(const char * input){
int ret;
AVPacket packet;
AVFrame *frame = NULL;
... ... @@ -1983,17 +1352,12 @@ int transcode(int argc, char *argv[]){
int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
memset(&packet, 0, sizeof(AVPacket));
if (argc != 3) {
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
char output[1024];
get_output_file_name(input, "pip_", output);
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
if ((ret = open_input_file(input)) < 0)
goto end;
if ((ret = open_output_file(argv[2])) < 0)
if ((ret = open_output_file(output)) < 0)
goto end;
if ((ret = init_filters()) < 0)
goto end;
... ... @@ -2092,4 +1456,66 @@ end:
return ret ? 1 : 0;
}
#define MIN_TIME_INTERVAL 0.2
int process_av_files()
{
av_register_all();
avfilter_register_all();
CVideoTranscoder videoTranscoder;
float cur_time = 0.0;
bool has_file = sorted_media.size();
while (has_file){
while (has_file){
media_info info = sorted_media.front();
if (info.start_time - cur_time < MIN_TIME_INTERVAL) {
sorted_media.pop_front();
videoTranscoder.add(info);
}
else {
break;
}
has_file = sorted_media.size();
}
cur_time = videoTranscoder.transcode();
}
while (videoTranscoder.all_processed()){
cur_time = videoTranscoder.transcode();
}
videoTranscoder.close();
return 0;
}
int main(int argc, char * argv[])
{
if (argc < 2) {
printf(" merge_pip 2.0.0\n");
printf(" merge video files to one pip video according to record info file,\nusage:");
printf("\n %s record_info_filename [-p] [-k]", argv[0]);
printf("\n -d :individual files for different time segment");
printf("\n\n");
return -1;
}
get_config_path();
load_codec_param();
for (int i = 2; i < argc; i++){
if (!strcmp(argv[i], "-d")){
out_one_video = false;
}
}
load_record_info(argv[1]);
process_av_files();
}
... ...
... ... @@ -84,6 +84,12 @@
<ItemGroup>
<ClCompile Include="merge_pip.cpp" />
<ClCompile Include="tools.cpp" />
<ClCompile Include="VideoDecoder.cpp" />
<ClCompile Include="VideoTranscoder.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="VideoDecoder.h" />
<ClInclude Include="VideoTranscoder.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
... ...
... ... @@ -24,5 +24,19 @@
<ClCompile Include="merge_pip.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="VideoTranscoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="VideoDecoder.cpp">
<Filter>源文件</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="VideoTranscoder.h">
<Filter>头文件</Filter>
</ClInclude>
<ClInclude Include="VideoDecoder.h">
<Filter>头文件</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
... ...