胡斌

seems ok

... ... @@ -28,9 +28,12 @@ vector<fileinfo> media_files[2];
void run_shell_cmd(const char * cmd)
{
printf("run command:%s\n", cmd);
if (!only_print){
if (only_print){
printf("%s\n", cmd);
}
else
{
printf("run command:%s\n", cmd);
system(cmd);
}
}
... ... @@ -167,6 +170,13 @@ void merge_audio_pic(fileinfo audio, const char * picfile, const char * destfile
run_shell_cmd(buf);
}
void merge_audio_pic(const char * audio, const char * picfile, const char * destfile)
{
char buf[2048];
sprintf(buf, "ffmpeg -y -loop 1 -i %s -i %s -loop 0 -shortest %s %s %s", picfile, audio, acodec_param, vcodec_param, destfile);
run_shell_cmd(buf);
}
void merge_video_silence(fileinfo video, const char * aacfile, const char * destfile)
{
char buf[2048];
... ... @@ -400,7 +410,7 @@ int process_files(const char * output_dest_file)
vector<string> tmp_files;
int nv = 0;
int nf = 0;
char destfile[1024],audio_file[1024];
char destfile[1024],audio_file[1024],pic_file[1024];
char blank_pic_file[1024];
char silence_aac_file[1024];
... ... @@ -431,146 +441,159 @@ int process_files(const char * output_dest_file)
audio = filesaudio[i];
audio_start = 0;
audio_start = 0;//for a new processing audio,the start is 0
for (; nv < filesvideo.size(); nv++) {
fileinfo video = filesvideo[nv];
if (video.start_time - audio_start > 0.100) {
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
if (video.start_time > audio.end_time){
split_audio(audio.name.c_str(), audio_start, audio.end_time - audio_start, destfile);
}
else{
split_audio(audio.name.c_str(), audio_start, video.start_time - audio_start, destfile);
if (video.start_time - audio_start > 0.1) {//video is behand audio too much
sprintf(audio_file, "%d_%s", nf, audio.name.c_str());
if (video.start_time < audio.end_time - 0.1){
split_audio(audio.name.c_str(), audio_start, video.start_time - audio_start, audio_file);
audio_start = video.start_time;
tmp_files.push_back(audio_file);
}
else {
strcpy(audio_file, audio.name.c_str());
}
tmp_files.push_back(destfile);
sprintf(destfile, "%s.jpg", video.name.c_str());
get_video_first_frame_jpeg(video, destfile);
tmp_files.push_back(destfile);
sprintf(pic_file, "%s.jpg", video.name.c_str());
get_video_first_frame_jpeg(video, pic_file);
tmp_files.push_back(pic_file);
sprintf(destfile, "%d.ts", nf);
merge_audio_pic(audio, nf, video, destfile);
merge_audio_pic(audio_file, pic_file, destfile);
merged_files.push_back(destfile);
nf++;
if (video.start_time > audio.end_time){//to next audio
if (video.start_time > audio.end_time){//all audio file no video, to next audio
audio_start = audio.end_time + 0.1;//no audio left
break;
}
}
if (nv != filesvideo.size() - 1) {// not the last one
if (audio.end_time > video.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, video.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
audio_start = video.end_time;
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
}
else if (video.end_time - audio.end_time < 0.1){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
if (audio.end_time > video.end_time){ //this video finish, to next video
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, video.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
break;
}
else {
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
vector<std::string > merge_audio_files;
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
double silence_audio_start = audio.end_time;
double silence_audio_end = video.end_time;
audio_start = video.end_time;
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
}
else if (video.end_time - audio.end_time < 0.1){//just fine, this audio file finish
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
audio_start = audio.end_time + 0.1;//no audio left
nf++;
nv++;//this video is used
break;
}
else { // this audio finish,add silence and/or next audio
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
vector<std::string > merge_audio_files;
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
double silence_audio_start = audio.end_time;
double silence_audio_end = video.end_time;
bool need_slilence = true;
bool to_next_video = false;
for (; i + 1 < filesaudio.size(); i++){//since video is not finished,try find next audio
audio = filesaudio[i + 1];
if (audio.start_time < video.end_time) {//next audio should split to fit the video
silence_audio_end = audio.start_time;
for (; i + 1 < filesaudio.size(); i++){
audio = filesaudio[i + 1];
if (audio.start_time < video.end_time) {
silence_audio_end = audio.start_time;
sprintf(destfile, "%d_%d_silence.aac", nf, i);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d_%d_silence.aac", nf, i);
split_audio(silence_aac_file,0, silence_audio_end - audio.end_time, destfile);
if (audio.end_time > video.end_time - 0.1 && audio.end_time < video.end_time + 0.1) {//just match
merge_audio_files.push_back(audio.name);
need_slilence = false;
i++;
break;
}
if (audio.end_time > video.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), 0, video.end_time - audio.start_time, destfile);
need_slilence = false;
//adjust timecode for the audio is part left
float cur_audio_start = video.end_time - audio.start_time;
audio_start = audio.start_time;
if (audio.end_time > video.end_time - 0.1 && audio.end_time < video.end_time + 0.1) {
merge_audio_files.push_back(audio.name);
i++;
break;
for (int j = i + 1; j < filesaudio.size(); j++){
filesaudio[j].start_time -= audio_start;
filesaudio[j].end_time -= audio_start;
}
if (audio.end_time > video.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), 0, video.end_time - audio.start_time, destfile);
break;
for (int j = nv; j < filesvideo.size(); j++) {
filesvideo[j].start_time -= audio_start;
filesvideo[j].end_time -= audio_start;
}
merge_audio_files.push_back(audio.name);
}
else {
i++;
audio = filesaudio[i];
audio_start = cur_audio_start;
to_next_video = true;
break;
}
merge_audio_files.push_back(audio.name);//whole audio should be appended
silence_audio_start = audio.end_time; //adjust the silence start
}
else {
break;//no need for next audio
}
}
sprintf(audio_file, "%d_merged.aac", nf);
merge_audio_file(merge_audio_files, audio_file);
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio_file, video.name.c_str(), destfile);
merged_files.push_back(destfile);
nf++;
break;
if (need_slilence) {
sprintf(destfile, "%d_silence.aac", nf);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
}
}
else {
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
sprintf(audio_file, "%d_merged.aac", nf);
merge_audio_file(merge_audio_files, audio_file);
if (audio.end_time - video.end_time < 1.0) {
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
audio_start = video.end_time;
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio_file, video.name.c_str(), destfile);
merged_files.push_back(destfile);
nf++;
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
if (!to_next_video){
nv++;
break;
}
else{
split_audio(audio.name.c_str(), video.start_time, video.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
}
}
audio_start = video.end_time;
if (audio_start < audio.end_time){
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.end_time, audio.end_time - video.end_time, destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), audio_start, audio.end_time - audio_start, destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
merge_audio_pic(audio, nf, blank_pic_file, destfile);
merged_files.push_back(destfile);
nf++;
}
}
sprintf(destfile, "%d.ts", nf);
merge_audio_pic(audio, nf, blank_pic_file, destfile);
merged_files.push_back(destfile);
nf++;
}
}
}
... ...