胡斌

need be improved

... ... @@ -184,14 +184,14 @@ void merge_video_silence(fileinfo video, const char * aacfile, const char * dest
run_shell_cmd(buf);
}
void megre_audio_video(fileinfo audio, int nf, fileinfo video, const char * destfile)
void merge_audio_video(fileinfo audio, int nf, fileinfo video, const char * destfile)
{
char buf[2048];
sprintf(buf, "ffmpeg -y -i %d_%s -i %s %s %s %s", nf, audio.name.c_str(), video.name.c_str(), acodec_param, vcodec_param, destfile);
run_shell_cmd(buf);
}
void megre_audio_video(const char * audio, const char * video, const char * destfile)
void merge_audio_video(const char * audio, const char * video, const char * destfile)
{
char buf[2048];
sprintf(buf, "ffmpeg -y -i %s -i %s %s %s %s", audio, video, acodec_param, vcodec_param, destfile);
... ... @@ -422,6 +422,8 @@ int process_files(const char * output_dest_file)
check_audio_duration();
get_duration_from_video_file();
//don't split video, for a video, using merged audios to mix with it
//for audio, mix with video or jpg
if (filesvideo.size()) {//has video files
if (filesaudio.size()){
... ... @@ -446,16 +448,136 @@ int process_files(const char * output_dest_file)
for (; nv < filesvideo.size(); nv++) {
fileinfo video = filesvideo[nv];
if (video.start_time - audio_start > 0.1) {//video is behand audio too much
if (video.start_time < audio_start - 0.1)
{//video is much more ahead of audio,try padding silence first
if (video.end_time < audio_start + 0.1) {
sprintf(destfile, "%d_silence.aac", nf);
split_audio(silence_aac_file, 0, video.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
merge_audio_video(destfile, video.name.c_str(), destfile);
merged_files.push_back(destfile);
nf++;
continue; //for next video
}
else {
// combine a audio file for the video
double silence_audio_start = audio.end_time;//maybe need append silence
double silence_audio_end = video.end_time;
bool need_append_silence = true;
bool to_next_video = false;
vector<std::string > merge_audio_files;
sprintf(destfile, "%d_0_silence.aac", nf);//a duration of silence
split_audio(silence_aac_file, 0, audio_start - video.start_time, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
if (audio.end_time < video.end_time + 0.1 && audio.end_time > video.end_time - 0.1) {
merge_audio_files.push_back(audio.name); //whole audio file,just fit
audio_start = audio.end_time + 0.1;
need_append_silence = false;
}
else if (audio.end_time > video.end_time){ //split part of audio file
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), audio_start, video.end_time - audio_start, destfile);
audio_start = video.end_time;
need_append_silence = false;
}
else {
merge_audio_files.push_back(audio.name);
for (; i + 1 < filesaudio.size(); i++){//since video is not finished,try find next audio
audio = filesaudio[i + 1];
if (audio.start_time < video.end_time) {//next audio should split to fit the video
silence_audio_end = audio.start_time;
sprintf(destfile, "%d_%d_silence.aac", nf, i);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
if (audio.end_time > video.end_time - 0.1 && audio.end_time < video.end_time + 0.1) {//just match
merge_audio_files.push_back(audio.name);
need_append_silence = false;
audio_start = audio.end_time + 0.1;
i++;//this audio is used
break;
}
if (audio.end_time > video.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), 0, video.end_time - audio.start_time, destfile);
need_append_silence = false;
//adjust timecode for the audio is part left
float cur_audio_start = video.end_time - audio.start_time;
audio_start = audio.start_time;
for (int j = i + 1; j < filesaudio.size(); j++){
filesaudio[j].start_time -= audio_start;
filesaudio[j].end_time -= audio_start;
}
for (int j = nv; j < filesvideo.size(); j++) {
filesvideo[j].start_time -= audio_start;
filesvideo[j].end_time -= audio_start;
}
i++;
audio = filesaudio[i];
audio_start = cur_audio_start;
to_next_video = true;
break;
}
merge_audio_files.push_back(audio.name);//whole audio should be appended
silence_audio_start = audio.end_time; //adjust the silence start
}
else {
break;//no need for next audio
}
}//end audio find for the video
}//end else
if (need_append_silence) {
sprintf(destfile, "%d_silence.aac", nf);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
}
sprintf(audio_file, "%d_merged.aac", nf);
merge_audio_file(merge_audio_files, audio_file);
sprintf(destfile, "%d.ts", nf);
merge_audio_video(audio_file, video.name.c_str(), destfile);
merged_files.push_back(destfile);
nf++;
if (!to_next_video){
nv++;//this video is processed
break;
}
}//end need combine
}//end video is ahead of audio
//-----VS-----
//AS----------
if (video.start_time - audio_start > 0.1) {//video is behind audio too much
sprintf(audio_file, "%d_%s", nf, audio.name.c_str());
if (video.start_time < audio.end_time - 0.1){
split_audio(audio.name.c_str(), audio_start, video.start_time - audio_start, audio_file);
audio_start = video.start_time;
tmp_files.push_back(audio_file);
}
else {
strcpy(audio_file, audio.name.c_str());
split_audio(audio.name.c_str(), audio_start, audio.end_time - audio_start, audio_file);
}
tmp_files.push_back(audio_file);
sprintf(pic_file, "%s.jpg", video.name.c_str());
get_video_first_frame_jpeg(video, pic_file);
... ... @@ -466,132 +588,147 @@ int process_files(const char * output_dest_file)
merged_files.push_back(destfile);
nf++;
if (video.start_time > audio.end_time){//all audio file no video, to next audio
if (video.start_time >= audio.end_time - 0.1){//all audio file no video, to next audio
audio_start = audio.end_time + 0.1;//no audio left
break;
}
}
//----AS--------
//----VS--------
else if (audio_start - video.start_time < 0.1){
if (audio.end_time > video.end_time){ //this video finish, to next video
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, video.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
if (audio.end_time > video.end_time){ //this video finish, to next video
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, video.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
audio_start = video.end_time;
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
}
else if (video.end_time - audio.end_time < 0.1){//just fine, this audio file finish
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
audio_start = video.end_time;
sprintf(destfile, "%d.ts", nf);
merge_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
nf++;
}
else if (video.end_time - audio.end_time < 0.1){//just fine, this audio file finish
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
audio_start = audio.end_time + 0.1;//no audio left
nf++;
nv++;//this video is used
break;
}
else { // this audio finish,add silence and/or next audio
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
vector<std::string > merge_audio_files;
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
double silence_audio_start = audio.end_time;
double silence_audio_end = video.end_time;
bool need_slilence = true;
bool to_next_video = false;
for (; i + 1 < filesaudio.size(); i++){//since video is not finished,try find next audio
audio = filesaudio[i + 1];
if (audio.start_time < video.end_time) {//next audio should split to fit the video
silence_audio_end = audio.start_time;
sprintf(destfile, "%d_%d_silence.aac", nf, i);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
merge_audio_video(audio, nf, video, destfile);
merged_files.push_back(destfile);
audio_start = audio.end_time + 0.1;//no audio left
nf++;
nv++;//this video is used
break;
}
else { // this audio finish,add silence and/or next audio
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), video.start_time, audio.end_time - video.start_time, destfile);
vector<std::string > merge_audio_files;
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
if (audio.end_time > video.end_time - 0.1 && audio.end_time < video.end_time + 0.1) {//just match
merge_audio_files.push_back(audio.name);
need_slilence = false;
i++;
break;
}
if (audio.end_time > video.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
audio_start = audio.end_time + 0.1;
double silence_audio_start = audio.end_time;
double silence_audio_end = video.end_time;
bool need_silence = true;
bool to_next_video = false;
for (; i + 1 < filesaudio.size(); i++){//since video is not finished,try find next audio
audio = filesaudio[i + 1];
if (audio.start_time < video.end_time) {//next audio should split to fit the video
silence_audio_end = audio.start_time;
sprintf(destfile, "%d_%d_silence.aac", nf, i);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), 0, video.end_time - audio.start_time, destfile);
need_slilence = false;
//adjust timecode for the audio is part left
float cur_audio_start = video.end_time - audio.start_time;
audio_start = audio.start_time;
for (int j = i + 1; j < filesaudio.size(); j++){
filesaudio[j].start_time -= audio_start;
filesaudio[j].end_time -= audio_start;
if (audio.end_time > video.end_time - 0.1 && audio.end_time < video.end_time + 0.1) {//just match
merge_audio_files.push_back(audio.name);
need_silence = false;
audio_start = audio.end_time + 0.1;
i++;
break;
}
for (int j = nv; j < filesvideo.size(); j++) {
filesvideo[j].start_time -= audio_start;
filesvideo[j].end_time -= audio_start;
if (audio.end_time > video.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
split_audio(audio.name.c_str(), 0, video.end_time - audio.start_time, destfile);
need_silence = false;
//adjust timecode for the audio is part left
float cur_audio_start = video.end_time - audio.start_time;
audio_start = audio.start_time;
for (int j = i + 1; j < filesaudio.size(); j++){
filesaudio[j].start_time -= audio_start;
filesaudio[j].end_time -= audio_start;
}
for (int j = nv; j < filesvideo.size(); j++) {
filesvideo[j].start_time -= audio_start;
filesvideo[j].end_time -= audio_start;
}
i++;
audio = filesaudio[i];
audio_start = cur_audio_start;
to_next_video = true;
break;
}
i++;
audio = filesaudio[i];
audio_start = cur_audio_start;
to_next_video = true;
break;
merge_audio_files.push_back(audio.name);//whole audio should be appended
silence_audio_start = audio.end_time; //adjust the silence start
audio_start = audio.end_time + 0.1;
}
else {
break;//no need for next audio
}
merge_audio_files.push_back(audio.name);//whole audio should be appended
silence_audio_start = audio.end_time; //adjust the silence start
}
else {
break;//no need for next audio
}
}
if (need_slilence) {
sprintf(destfile, "%d_silence.aac", nf);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
}
if (need_silence) {
sprintf(destfile, "%d_silence.aac", nf);
split_audio(silence_aac_file, 0, silence_audio_end - silence_audio_start, destfile);
merge_audio_files.push_back(destfile);
tmp_files.push_back(destfile);
}
sprintf(audio_file, "%d_merged.aac", nf);
merge_audio_file(merge_audio_files, audio_file);
sprintf(audio_file, "%d_merged.aac", nf);
merge_audio_file(merge_audio_files, audio_file);
sprintf(destfile, "%d.ts", nf);
megre_audio_video(audio_file, video.name.c_str(), destfile);
merged_files.push_back(destfile);
nf++;
sprintf(destfile, "%d.ts", nf);
merge_audio_video(audio_file, video.name.c_str(), destfile);
merged_files.push_back(destfile);
nf++;
if (!to_next_video){
nv++;
break;
if (!to_next_video){
nv++;
break;
}
}
}
}
if (audio_start < audio.end_time){
sprintf(destfile, "%d_%s", nf, audio.name.c_str());
split_audio(audio.name.c_str(), audio_start, audio.end_time - audio_start, destfile);
tmp_files.push_back(destfile);
sprintf(destfile, "%d.ts", nf);
merge_audio_pic(audio, nf, blank_pic_file, destfile);
if (nv < filesvideo.size()) {
fileinfo video = filesvideo[nv];
sprintf(pic_file, "%s.jpg", video.name.c_str());
get_video_first_frame_jpeg(video, pic_file);
tmp_files.push_back(pic_file);
}
else {
strcpy(pic_file, blank_pic_file);
}
merge_audio_pic(audio, nf, pic_file, destfile);
merged_files.push_back(destfile);
nf++;
}
... ...