Toggle navigation
Toggle navigation
此项目
正在载入...
Sign in
胡斌
/
merge_av
转到一个项目
Toggle navigation
项目
群组
代码片段
帮助
Toggle navigation pinning
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Network
Create a new issue
Builds
Commits
Authored by
胡斌
2018-11-21 21:59:59 +0800
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
f6f4d7a0c5350ebcc917b3249e7f93fbc6808f3f
f6f4d7a0
1 parent
c73db2f8
first commit of using ffmpeg code
显示空白字符变更
内嵌
并排对比
正在显示
2 个修改的文件
包含
613 行增加
和
316 行删除
pip/merge_pip.cpp
pip/pip.vcxproj
pip/merge_pip.cpp
查看文件 @
f6f4d7a
...
...
@@ -21,6 +21,11 @@ enum media_type{
mt_av
=
3
,
};
enum
media_role
{
mr_teacher
=
0
,
mr_student
=
1
,
};
enum
timestamp_type
{
tt_start
=
0
,
tt_end
=
1
,
...
...
@@ -33,7 +38,7 @@ public:
string
name
;
int
index
;
media_type
m_type
;
int
channel
;
media_role
m_rote
;
int
rotate
;
//degree,0,90,180 ...
};
...
...
@@ -47,8 +52,8 @@ public:
float
duration
;
int
index
;
int
channel
;
media_type
m_type
;
media_role
m_rote
;
timestamp_type
t_type
;
};
...
...
@@ -120,40 +125,24 @@ char acodec_param[1024];
char
pip_param
[
1024
];
char
pip1_param
[
1024
];
bool
first_time_set
=
false
;
float
start_time
=
0.0
f
;
void
init_read_file
()
{
first_time_set
=
false
;
start_time
=
0.0
f
;
}
void
addinfo
(
string
t
,
string
name
,
bool
bstart
){
void
addinfo
(
float
t
,
string
name
,
bool
bstart
,
media_role
role
){
media_type
mtype
=
name
.
substr
(
name
.
length
()
-
4
,
name
.
length
())
==
".aac"
?
mt_audio
:
mt_video
;
if
(
bstart
)
{
fileinfo
f
;
f
.
start_time
=
atof
(
t
.
c_str
())
;
f
.
start_time
=
t
;
f
.
end_time
=
f
.
start_time
;
f
.
name
=
name
;
f
.
m_type
=
mtype
;
f
.
m_rote
=
role
;
f
.
rotate
=
0
;
if
(
!
first_time_set
)
{
first_time_set
=
true
;
start_time
=
f
.
start_time
;
}
f
.
start_time
-=
start_time
;
media_files
.
push_back
(
f
);
}
else
{
int
i
;
for
(
i
=
0
;
i
<
media_files
.
size
();
i
++
)
{
if
(
media_files
[
i
].
name
==
name
)
{
media_files
[
i
].
end_time
=
atof
(
t
.
c_str
());
media_files
[
i
].
end_time
-=
start_time
;
media_files
[
i
].
end_time
=
t
;
break
;
}
}
...
...
@@ -187,7 +176,6 @@ void addinfo(float start, float duration, string name, int channel){
f
.
end_time
=
f
.
start_time
+
duration
;
f
.
name
=
name
;
f
.
m_type
=
mtype
;
f
.
channel
=
channel
;
media_files
.
push_back
(
f
);
}
...
...
@@ -680,7 +668,6 @@ void add_media_infos()
m
.
end_time
=
f
.
end_time
;
m
.
m_type
=
f
.
m_type
;
m
.
t_type
=
tt_start
;
m
.
channel
=
f
.
channel
;
m
.
duration
=
f
.
end_time
-
f
.
start_time
;
m
.
type_time
=
m
.
start_time
;
m
.
rotate
=
f
.
rotate
;
...
...
@@ -847,54 +834,6 @@ int find_video_between_the_audio()
return
video_index
;
}
int
merge_video_pip
(
vector
<
media_info
>
&
files
)
{
char
ch0_file
[
1024
],
ch1_file
[
1024
];
vector
<
string
>
merge_video_files
;
media_info
ch0_start
,
ch0_end
,
ch1_start
,
ch1_end
;
if
(
files
[
0
].
channel
==
0
)
{
ch0_start
=
files
[
0
];
ch0_end
=
files
[
2
];
ch1_start
=
files
[
1
];
ch1_end
=
files
[
3
];
}
else
{
ch0_start
=
files
[
1
];
ch0_end
=
files
[
3
];
ch1_start
=
files
[
0
];
ch1_end
=
files
[
2
];
}
if
(
ch0_start
.
type_time
-
ch0_start
.
start_time
>
0.10
||
ch0_end
.
end_time
-
ch0_end
.
type_time
>
0.10
)
{
sprintf
(
ch0_file
,
"%d_%s"
,
nf
,
ch0_start
.
name
.
c_str
());
split_av
(
ch0_start
.
name
.
c_str
(),
ch0_start
.
type_time
-
ch0_start
.
start_time
,
ch0_end
.
type_time
-
ch0_start
.
type_time
,
ch0_file
);
tmp_files
.
push_back
(
ch0_file
);
}
else
{
strcpy
(
ch0_file
,
ch0_start
.
name
.
c_str
());
}
if
(
ch1_start
.
type_time
-
ch1_start
.
start_time
>
0.10
||
ch1_end
.
end_time
-
ch1_end
.
type_time
>
0.10
)
{
sprintf
(
ch1_file
,
"%d_%s"
,
nf
,
ch1_start
.
name
.
c_str
());
split_av
(
ch1_start
.
name
.
c_str
(),
ch1_start
.
type_time
-
ch1_start
.
start_time
,
ch1_end
.
type_time
-
ch1_start
.
type_time
,
ch1_file
);
tmp_files
.
push_back
(
ch1_file
);
}
else
{
strcpy
(
ch1_file
,
ch1_start
.
name
.
c_str
());
}
sprintf
(
destfile
,
"%d.ts"
,
nf
);
merge_video_pip
(
ch0_file
,
ch1_file
,
destfile
);
merged_files
.
push_back
(
destfile
);
nf
++
;
return
0
;
}
int
transcode_file
(
vector
<
media_info
>
files
)
{
...
...
@@ -952,10 +891,6 @@ bool is_audio_start(int index)
return
(
sorted_infos
[
index
].
m_type
==
mt_audio
&&
sorted_infos
[
index
].
t_type
==
tt_start
);
}
bool
is_ch0_start
(
int
index
)
{
return
(
sorted_infos
[
index
].
channel
==
0
&&
sorted_infos
[
index
].
t_type
==
tt_start
);
}
int
split_audio_for_video
(
int
audio_start
,
vector
<
media_info
>
&
result
)
...
...
@@ -1362,159 +1297,7 @@ int process_va_files()
return
0
;
}
int
process_merged_files
()
{
char
outputfile
[
1024
];
vector
<
media_info
>
cur_processing
;
int
nOutPutFile
=
0
;
float
start_time
;
bool
is_start
=
true
;
string
start_file
;
while
(
sorted_infos
.
size
())
{
int
channel
=
sorted_infos
[
0
].
channel
;
if
(
sorted_infos
[
1
].
index
==
sorted_infos
[
0
].
index
)
{
get_front_info
(
1
,
cur_processing
);
transcode_file
(
cur_processing
);
}
else
if
(
sorted_infos
[
1
].
type_time
-
sorted_infos
[
0
].
type_time
>
0.2
)
{
split_ch0_for_no_process
(
cur_processing
);
transcode_file
(
cur_processing
);
}
else
{
split_ch0_for_ch1
(
cur_processing
);
merge_video_pip
(
cur_processing
);
}
//if the duration between the processed end and the start of not processed is large than 200 ms, reopen a new file
if
(
is_start
){
start_time
=
cur_processing
[
0
].
start_time
;
start_file
=
cur_processing
[
0
].
name
;
is_start
=
false
;
}
if
(
is_need_output
(
nOutPutFile
,
cur_processing
,
start_file
.
c_str
(),
outputfile
,
PIP_PREFIX
)){
nOutPutFile
++
;
concate_files_and_adjust_timecode
(
outputfile
);
save_out_info
(
start_time
,
outputfile
);
is_start
=
true
;
}
}
return
0
;
}
int
process_record_file_to_ts
()
{
//don't split video, for a video, using merged audios to mix with it
//for audio, mix with video or jpg
char
outputfile
[
1024
];
init_merge_av
();
if
(
!
media_files
.
size
()){
return
0
;
}
fp_out_info
=
fopen
(
out_info_file
,
"wt"
);
// judge if it is only one type
media_type
mt
=
media_files
[
0
].
m_type
;
bool
only_one_type
=
true
;
for
(
int
i
=
1
;
i
<
media_files
.
size
();
i
++
){
if
(
mt
!=
media_files
[
i
].
m_type
){
only_one_type
=
false
;
break
;
}
}
if
(
only_one_type
){
if
(
mt
==
mt_audio
)
{
for
(
int
i
=
0
;
i
<
media_files
.
size
();
i
++
){
fileinfo
audio
=
media_files
[
i
];
get_output_file_name
(
i
,
audio
.
name
.
c_str
(),
MERGED_PREFIX
,
outputfile
);
merge_audio_pic
(
audio
.
name
.
c_str
(),
blank_pic_file
,
outputfile
);
save_out_info
(
audio
.
start_time
,
outputfile
);
}
}
else
{
for
(
int
i
=
0
;
i
<
media_files
.
size
();
i
++
){
fileinfo
video
=
media_files
[
i
];
get_output_file_name
(
i
,
video
.
name
.
c_str
(),
MERGED_PREFIX
,
outputfile
);
merge_video_silence
(
video
,
silence_aac_file
,
destfile
);
save_out_info
(
video
.
start_time
,
outputfile
);
}
}
}
else
{
process_va_files
();
}
if
(
fp_out_info
)
{
fclose
(
fp_out_info
);
}
return
0
;
}
int
process_merged_files_to_pip_files
()
{
//don't split video, for a video, using merged audios to mix with it
//for audio, mix with video or jpg
char
outputfile
[
1024
];
if
(
!
media_files
.
size
()){
return
0
;
}
init_merge_pip
();
fp_out_info
=
fopen
(
out_info_file
,
"wt"
);
process_merged_files
();
if
(
fp_out_info
)
{
fclose
(
fp_out_info
);
}
return
0
;
}
int
readfile
(
const
char
*
filename
)
{
init_read_file
();
media_files
.
clear
();
ifstream
fin
(
filename
);
if
(
!
fin
)
{
return
-
1
;
}
const
int
LINE_LENGTH
=
1000
;
char
str
[
LINE_LENGTH
];
while
(
fin
.
getline
(
str
,
LINE_LENGTH
))
{
vector
<
string
>
res
;
split
(
str
,
" "
,
res
);
if
(
res
.
size
()
>=
3
)
{
if
(
res
[
2
]
==
"create"
){
addinfo
(
res
[
0
],
res
[
1
],
true
);
}
else
if
(
res
[
2
]
==
"close"
)
{
addinfo
(
res
[
0
],
res
[
1
],
false
);
}
else
if
(
res
[
2
]
==
"info"
)
{
if
(
res
.
size
()
>
5
)
{
const
char
*
pInfo
=
res
[
5
].
c_str
();
if
(
!
strncmp
(
pInfo
,
"rotation="
,
9
)){
addinfo
(
res
[
0
].
c_str
(),
res
[
1
].
c_str
(),
pInfo
+
9
);
}
}
}
}
}
return
0
;
}
// parse the filename like 4165000_20180203013327202.aac
#define get_sub_str_to_x(x , source, len, result) strncpy(x, source, len); x[len] = 0; source += len; result = atoi(x);
...
...
@@ -1524,8 +1307,12 @@ float get_start_time_from_filename(const char * filename)
int
year
,
month
,
day
,
hour
,
min
,
sec
,
minsec
;
char
buf
[
5
];
const
char
*
start
=
strstr
(
filename
,
"_"
);
const
char
*
end
=
strstr
(
start
+
1
,
"_"
);
if
(
end
)
{
//get the next
start
=
end
;
}
start
++
;
const
char
*
end
=
strstr
(
start
,
"."
);
end
=
strstr
(
start
,
"."
);
get_sub_str_to_x
(
buf
,
start
,
4
,
year
);
...
...
@@ -1552,37 +1339,43 @@ float get_start_time_from_filename(const char * filename)
return
(
float
)(
t
)
+
minsec
/
1000.0
;
}
vector
<
string
>
all_input_files_for_pip
;
int
readfile
(
const
char
*
filename
,
int
channel
)
int
readfile
(
const
char
*
filename
,
media_role
role
)
{
init_read_file
();
ifstream
fin
(
filename
);
if
(
!
fin
)
{
return
-
1
;
}
float
start_time
=
get_start_time_from_filename
(
filename
);
const
int
LINE_LENGTH
=
1000
;
char
str
[
LINE_LENGTH
];
float
start_time
;
bool
bstart_time
=
true
;
while
(
fin
.
getline
(
str
,
LINE_LENGTH
))
{
vector
<
string
>
res
;
split
(
str
,
" "
,
res
);
if
(
res
.
size
()
==
3
)
{
if
(
bstart_time
)
{
start_time
=
get_start_time_from_filename
(
res
[
2
].
c_str
()
+
strlen
(
MERGED_PREFIX
));
bstart_time
=
false
;
if
(
res
.
size
()
>=
3
)
{
if
(
res
[
2
]
==
"create"
){
addinfo
(
start_time
+
atof
(
res
[
0
].
c_str
()),
res
[
1
],
true
,
role
);
}
else
if
(
res
[
2
]
==
"close"
)
{
addinfo
(
start_time
+
atof
(
res
[
0
].
c_str
()),
res
[
1
],
false
,
role
);
}
else
if
(
res
[
2
]
==
"info"
)
{
if
(
res
.
size
()
>
5
)
{
const
char
*
pInfo
=
res
[
5
].
c_str
();
if
(
!
strncmp
(
pInfo
,
"rotation="
,
9
)){
addinfo
(
res
[
0
].
c_str
(),
res
[
1
].
c_str
(),
pInfo
+
9
);
}
}
}
addinfo
(
atof
(
res
[
0
].
c_str
())
+
start_time
,
atof
(
res
[
1
].
c_str
())
,
res
[
2
],
channel
);
all_input_files_for_pip
.
push_back
(
res
[
2
]);
}
}
return
0
;
}
vector
<
string
>
all_input_files_for_pip
;
void
load_codec_param
()
{
...
...
@@ -1645,7 +1438,7 @@ void get_outinfo_file_name(char * input)
strcat
(
out_info_file
,
"_out.txt"
);
}
int
process_record_info_to_ts
(
char
*
record_info
,
vector
<
string
>
&
merged_info1
,
vector
<
string
>
&
merged_info2
)
int
load_record_info
(
char
*
record_info
)
{
ifstream
fin
(
record_info
);
if
(
!
fin
)
{
...
...
@@ -1657,6 +1450,9 @@ int process_record_info_to_ts(char * record_info, vector<string> & merged_info1,
bool
bInTeacher
=
false
;
bool
bInStudent
=
false
;
int
nstudent
=
0
;
media_files
.
clear
();
while
(
fin
.
getline
(
str
,
LINE_LENGTH
))
{
if
(
!
strncmp
(
str
,
"teacher:"
,
8
)){
...
...
@@ -1673,40 +1469,26 @@ int process_record_info_to_ts(char * record_info, vector<string> & merged_info1,
continue
;
//assume the file name > 20
}
else
if
(
bInTeacher
){
readfile
(
str
);
get_outinfo_file_name
(
str
);
merged_info1
.
push_back
(
out_info_file
);
process_record_file_to_ts
();
readfile
(
str
,
mr_teacher
);
}
else
if
(
bInStudent
){
readfile
(
str
);
get_outinfo_file_name
(
str
);
merged_info2
.
push_back
(
out_info_file
);
process_record_file_to_ts
();
readfile
(
str
,
mr_student
);
}
}
unifiy_start_time
();
add_media_infos
();
return
0
;
}
//#define TEST
int
main
(
int
argc
,
char
*
argv
[])
{
#ifdef TEST
const
char
*
video
=
"D:
\\
media
\\
talk
\\
20181112
\\
talk915_824250397_105638
\\
137786519_20181112105817677.ts"
;
const
char
*
dest
=
"D:
\\
media
\\
talk
\\
20181112
\\
talk915_824250397_105638
\\
137786519_20181112105817677.ts.jpg"
;
get_config_path
();
load_codec_param
();
init_merge_av
();
get_video_last_frame_jpeg
(
video
,
dest
);
merge_pic_silence
(
dest
,
1.56
,
"D:
\\
media
\\
talk
\\
20181112
\\
talk915_824250397_105638
\\
137786519_20181112105817677_last_s156.ts"
);
#endif
if
(
argc
<
2
)
{
printf
(
" merge_pip 1.0.2
\n
"
);
printf
(
" run ffmpeg to merge video files to one pip video according to record info file,
\n
usage:"
);
printf
(
" merge_pip 2.0.0
\n
"
);
printf
(
" merge video files to one pip video according to record info file,
\n
usage:"
);
printf
(
"
\n
%s record_info_filename [-p] [-k]"
,
argv
[
0
]);
printf
(
"
\n
-p :only print the command,don't run ffmpeg"
);
printf
(
"
\n
-k :keep the temp files"
);
printf
(
"
\n
-d :individual files for different time segment"
);
printf
(
"
\n\n
"
);
return
-
1
;
...
...
@@ -1716,85 +1498,598 @@ int main(int argc, char * argv[])
load_codec_param
();
bool
bmerge_files
=
true
;
for
(
int
i
=
2
;
i
<
argc
;
i
++
){
if
(
!
strcmp
(
argv
[
i
],
"-p"
)){
only_print
=
true
;
if
(
!
strcmp
(
argv
[
i
],
"-d"
)){
out_one_video
=
false
;
}
else
if
(
!
strcmp
(
argv
[
i
],
"-k"
)){
keep_tmp_files
=
true
;
}
else
if
(
!
strcmp
(
argv
[
i
],
"-n"
)){
bmerge_files
=
false
;
load_record_info
(
argv
[
1
]);
}
#define __STDC_FORMAT_MACROS
#include <stdint.h>
#include <inttypes.h>
extern
"C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
}
#ifdef WIN32
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#endif
#if _MSC_VER
#define snprintf _snprintf
#define PRIu64 "I64u"
#define PRId64 "I64d"
#define PRIx64 "I64x"
#define PRIX64 "I64X"
#endif
static
AVFormatContext
*
ifmt_ctx
;
static
AVFormatContext
*
ofmt_ctx
;
typedef
struct
FilteringContext
{
AVFilterContext
*
buffersink_ctx
;
AVFilterContext
*
buffersrc_ctx
;
AVFilterGraph
*
filter_graph
;
}
FilteringContext
;
static
FilteringContext
*
filter_ctx
;
static
int
open_input_file
(
const
char
*
filename
)
{
int
ret
;
unsigned
int
i
;
ifmt_ctx
=
NULL
;
if
((
ret
=
avformat_open_input
(
&
ifmt_ctx
,
filename
,
NULL
,
NULL
))
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot open input file
\n
"
);
return
ret
;
}
else
if
(
!
strcmp
(
argv
[
i
],
"-d"
)){
out_one_video
=
false
;
if
((
ret
=
avformat_find_stream_info
(
ifmt_ctx
,
NULL
))
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot find stream information
\n
"
);
return
ret
;
}
for
(
i
=
0
;
i
<
ifmt_ctx
->
nb_streams
;
i
++
)
{
AVStream
*
stream
;
AVCodecContext
*
codec_ctx
;
stream
=
ifmt_ctx
->
streams
[
i
];
codec_ctx
=
stream
->
codec
;
/* Reencode video & audio and remux subtitles etc. */
if
(
codec_ctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
||
codec_ctx
->
codec_type
==
AVMEDIA_TYPE_AUDIO
)
{
/* Open decoder */
ret
=
avcodec_open2
(
codec_ctx
,
avcodec_find_decoder
(
codec_ctx
->
codec_id
),
NULL
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Failed to open decoder for stream #%u
\n
"
,
i
);
return
ret
;
}
}
}
av_dump_format
(
ifmt_ctx
,
0
,
filename
,
0
);
return
0
;
}
static
int
open_output_file
(
const
char
*
filename
)
{
AVStream
*
out_stream
;
AVStream
*
in_stream
;
AVCodecContext
*
dec_ctx
,
*
enc_ctx
;
AVCodec
*
encoder
;
int
ret
;
unsigned
int
i
;
ofmt_ctx
=
NULL
;
avformat_alloc_output_context2
(
&
ofmt_ctx
,
NULL
,
NULL
,
filename
);
if
(
!
ofmt_ctx
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Could not create output context
\n
"
);
return
AVERROR_UNKNOWN
;
}
for
(
i
=
0
;
i
<
ifmt_ctx
->
nb_streams
;
i
++
)
{
out_stream
=
avformat_new_stream
(
ofmt_ctx
,
NULL
);
if
(
!
out_stream
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Failed allocating output stream
\n
"
);
return
AVERROR_UNKNOWN
;
}
in_stream
=
ifmt_ctx
->
streams
[
i
];
dec_ctx
=
in_stream
->
codec
;
enc_ctx
=
out_stream
->
codec
;
if
(
dec_ctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
||
dec_ctx
->
codec_type
==
AVMEDIA_TYPE_AUDIO
)
{
/* in this example, we choose transcoding to same codec */
encoder
=
avcodec_find_encoder
(
dec_ctx
->
codec_id
);
if
(
!
encoder
)
{
av_log
(
NULL
,
AV_LOG_FATAL
,
"Necessary encoder not found
\n
"
);
return
AVERROR_INVALIDDATA
;
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if
(
dec_ctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
)
{
enc_ctx
->
height
=
dec_ctx
->
height
;
enc_ctx
->
width
=
dec_ctx
->
width
;
enc_ctx
->
sample_aspect_ratio
=
dec_ctx
->
sample_aspect_ratio
;
/* take first format from list of supported formats */
enc_ctx
->
pix_fmt
=
encoder
->
pix_fmts
[
0
];
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx
->
time_base
=
dec_ctx
->
time_base
;
}
else
{
enc_ctx
->
sample_rate
=
dec_ctx
->
sample_rate
;
enc_ctx
->
channel_layout
=
dec_ctx
->
channel_layout
;
enc_ctx
->
channels
=
av_get_channel_layout_nb_channels
(
enc_ctx
->
channel_layout
);
/* take first format from list of supported formats */
enc_ctx
->
sample_fmt
=
encoder
->
sample_fmts
[
0
];
enc_ctx
->
time_base
.
num
=
1
;
enc_ctx
->
time_base
.
den
=
enc_ctx
->
sample_rate
;
}
vector
<
string
>
merged_info1
;
vector
<
string
>
merged_info2
;
#if 0
if (bmerge_files) {
if (readfile(argv[1]) < 0) {
printf("open file: %s error", argv[1]);
return -2;
/* Third parameter can be used to pass settings to encoder */
ret
=
avcodec_open2
(
enc_ctx
,
encoder
,
NULL
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot open video encoder for stream #%u
\n
"
,
i
);
return
ret
;
}
}
else
if
(
dec_ctx
->
codec_type
==
AVMEDIA_TYPE_UNKNOWN
)
{
av_log
(
NULL
,
AV_LOG_FATAL
,
"Elementary stream #%d is of unknown type, cannot proceed
\n
"
,
i
);
return
AVERROR_INVALIDDATA
;
}
else
{
/* if this stream must be remuxed */
ret
=
avcodec_copy_context
(
ofmt_ctx
->
streams
[
i
]
->
codec
,
ifmt_ctx
->
streams
[
i
]
->
codec
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Copying stream context failed
\n
"
);
return
ret
;
}
}
get_outinfo_file_name(argv[1]);
strcpy(merged_info1, out_info_file);
process_record_file_to_ts();
if
(
ofmt_ctx
->
oformat
->
flags
&
AVFMT_GLOBALHEADER
)
enc_ctx
->
flags
|=
CODEC_FLAG_GLOBAL_HEADER
;
}
av_dump_format
(
ofmt_ctx
,
0
,
filename
,
1
);
if
(
!
(
ofmt_ctx
->
oformat
->
flags
&
AVFMT_NOFILE
))
{
ret
=
avio_open
(
&
ofmt_ctx
->
pb
,
filename
,
AVIO_FLAG_WRITE
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Could not open output file '%s'"
,
filename
);
return
ret
;
}
}
if (readfile(argv[2]) < 0) {
printf("open file: %s error", argv[1]);
return -2;
/* init muxer, write output file header */
ret
=
avformat_write_header
(
ofmt_ctx
,
NULL
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Error occurred when opening output file
\n
"
);
return
ret
;
}
get_outinfo_file_name(argv[2]);
strcpy(merged_info2, out_info_file);
process_record_file_to_ts();
return
0
;
}
static
int
init_filter
(
FilteringContext
*
fctx
,
AVCodecContext
*
dec_ctx
,
AVCodecContext
*
enc_ctx
,
const
char
*
filter_spec
)
{
char
args
[
512
];
int
ret
=
0
;
AVFilter
*
buffersrc
=
NULL
;
AVFilter
*
buffersink
=
NULL
;
AVFilterContext
*
buffersrc_ctx
=
NULL
;
AVFilterContext
*
buffersink_ctx
=
NULL
;
AVFilterInOut
*
outputs
=
avfilter_inout_alloc
();
AVFilterInOut
*
inputs
=
avfilter_inout_alloc
();
AVFilterGraph
*
filter_graph
=
avfilter_graph_alloc
();
if
(
!
outputs
||
!
inputs
||
!
filter_graph
)
{
ret
=
AVERROR
(
ENOMEM
);
goto
end
;
}
if
(
dec_ctx
->
codec_type
==
AVMEDIA_TYPE_VIDEO
)
{
buffersrc
=
avfilter_get_by_name
(
"buffer"
);
buffersink
=
avfilter_get_by_name
(
"buffersink"
);
if
(
!
buffersrc
||
!
buffersink
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"filtering source or sink element not found
\n
"
);
ret
=
AVERROR_UNKNOWN
;
goto
end
;
}
snprintf
(
args
,
sizeof
(
args
),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d"
,
dec_ctx
->
width
,
dec_ctx
->
height
,
dec_ctx
->
pix_fmt
,
dec_ctx
->
time_base
.
num
,
dec_ctx
->
time_base
.
den
,
dec_ctx
->
sample_aspect_ratio
.
num
,
dec_ctx
->
sample_aspect_ratio
.
den
);
ret
=
avfilter_graph_create_filter
(
&
buffersrc_ctx
,
buffersrc
,
"in"
,
args
,
NULL
,
filter_graph
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot create buffer source
\n
"
);
goto
end
;
}
ret
=
avfilter_graph_create_filter
(
&
buffersink_ctx
,
buffersink
,
"out"
,
NULL
,
NULL
,
filter_graph
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot create buffer sink
\n
"
);
goto
end
;
}
ret
=
av_opt_set_bin
(
buffersink_ctx
,
"pix_fmts"
,
(
uint8_t
*
)
&
enc_ctx
->
pix_fmt
,
sizeof
(
enc_ctx
->
pix_fmt
),
AV_OPT_SEARCH_CHILDREN
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot set output pixel format
\n
"
);
goto
end
;
}
}
else
if
(
dec_ctx
->
codec_type
==
AVMEDIA_TYPE_AUDIO
)
{
buffersrc
=
avfilter_get_by_name
(
"abuffer"
);
buffersink
=
avfilter_get_by_name
(
"abuffersink"
);
if
(
!
buffersrc
||
!
buffersink
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"filtering source or sink element not found
\n
"
);
ret
=
AVERROR_UNKNOWN
;
goto
end
;
}
if
(
!
dec_ctx
->
channel_layout
)
dec_ctx
->
channel_layout
=
av_get_default_channel_layout
(
dec_ctx
->
channels
);
sprintf
(
args
,
//sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"
PRIx64
,
dec_ctx
->
time_base
.
num
,
dec_ctx
->
time_base
.
den
,
dec_ctx
->
sample_rate
,
av_get_sample_fmt_name
(
dec_ctx
->
sample_fmt
),
dec_ctx
->
channel_layout
);
ret
=
avfilter_graph_create_filter
(
&
buffersrc_ctx
,
buffersrc
,
"in"
,
args
,
NULL
,
filter_graph
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot create audio buffer source
\n
"
);
goto
end
;
}
ret
=
avfilter_graph_create_filter
(
&
buffersink_ctx
,
buffersink
,
"out"
,
NULL
,
NULL
,
filter_graph
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot create audio buffer sink
\n
"
);
goto
end
;
}
ret
=
av_opt_set_bin
(
buffersink_ctx
,
"sample_fmts"
,
(
uint8_t
*
)
&
enc_ctx
->
sample_fmt
,
sizeof
(
enc_ctx
->
sample_fmt
),
AV_OPT_SEARCH_CHILDREN
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot set output sample format
\n
"
);
goto
end
;
}
ret
=
av_opt_set_bin
(
buffersink_ctx
,
"channel_layouts"
,
(
uint8_t
*
)
&
enc_ctx
->
channel_layout
,
sizeof
(
enc_ctx
->
channel_layout
),
AV_OPT_SEARCH_CHILDREN
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot set output channel layout
\n
"
);
goto
end
;
}
ret
=
av_opt_set_bin
(
buffersink_ctx
,
"sample_rates"
,
(
uint8_t
*
)
&
enc_ctx
->
sample_rate
,
sizeof
(
enc_ctx
->
sample_rate
),
AV_OPT_SEARCH_CHILDREN
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Cannot set output sample rate
\n
"
);
goto
end
;
}
}
else
{
strcpy(merged_info1, argv[1]);
strcpy(merged_info2, argv[2]);
ret
=
AVERROR_UNKNOWN
;
goto
end
;
}
#else
process_record_info_to_ts
(
argv
[
1
],
merged_info1
,
merged_info2
);
#endif
/* Endpoints for the filter graph. */
outputs
->
name
=
av_strdup
(
"in"
);
outputs
->
filter_ctx
=
buffersrc_ctx
;
outputs
->
pad_idx
=
0
;
outputs
->
next
=
NULL
;
media_files
.
clear
();
for
(
int
i
=
0
;
i
<
merged_info1
.
size
();
i
++
)
{
if
(
readfile
(
merged_info1
[
i
].
c_str
(),
0
)
<
0
)
{
printf
(
"open file: %s error"
,
merged_info1
[
i
].
c_str
());
return
-
2
;
inputs
->
name
=
av_strdup
(
"out"
);
inputs
->
filter_ctx
=
buffersink_ctx
;
inputs
->
pad_idx
=
0
;
inputs
->
next
=
NULL
;
if
(
!
outputs
->
name
||
!
inputs
->
name
)
{
ret
=
AVERROR
(
ENOMEM
);
goto
end
;
}
if
((
ret
=
avfilter_graph_parse_ptr
(
filter_graph
,
filter_spec
,
&
inputs
,
&
outputs
,
NULL
))
<
0
)
goto
end
;
if
((
ret
=
avfilter_graph_config
(
filter_graph
,
NULL
))
<
0
)
goto
end
;
/* Fill FilteringContext */
fctx
->
buffersrc_ctx
=
buffersrc_ctx
;
fctx
->
buffersink_ctx
=
buffersink_ctx
;
fctx
->
filter_graph
=
filter_graph
;
end:
avfilter_inout_free
(
&
inputs
);
avfilter_inout_free
(
&
outputs
);
return
ret
;
}
static
int
init_filters
(
void
)
{
const
char
*
filter_spec
;
unsigned
int
i
;
int
ret
;
filter_ctx
=
(
FilteringContext
*
)
av_malloc_array
(
ifmt_ctx
->
nb_streams
,
sizeof
(
*
filter_ctx
));
if
(
!
filter_ctx
)
return
AVERROR
(
ENOMEM
);
for
(
i
=
0
;
i
<
ifmt_ctx
->
nb_streams
;
i
++
)
{
filter_ctx
[
i
].
buffersrc_ctx
=
NULL
;
filter_ctx
[
i
].
buffersink_ctx
=
NULL
;
filter_ctx
[
i
].
filter_graph
=
NULL
;
if
(
!
(
ifmt_ctx
->
streams
[
i
]
->
codec
->
codec_type
==
AVMEDIA_TYPE_AUDIO
||
ifmt_ctx
->
streams
[
i
]
->
codec
->
codec_type
==
AVMEDIA_TYPE_VIDEO
))
continue
;
if
(
ifmt_ctx
->
streams
[
i
]
->
codec
->
codec_type
==
AVMEDIA_TYPE_VIDEO
)
filter_spec
=
"null"
;
/* passthrough (dummy) filter for video */
else
filter_spec
=
"anull"
;
/* passthrough (dummy) filter for audio */
ret
=
init_filter
(
&
filter_ctx
[
i
],
ifmt_ctx
->
streams
[
i
]
->
codec
,
ofmt_ctx
->
streams
[
i
]
->
codec
,
filter_spec
);
if
(
ret
)
return
ret
;
}
return
0
;
}
static
int
encode_write_frame
(
AVFrame
*
filt_frame
,
unsigned
int
stream_index
,
int
*
got_frame
)
{
int
ret
;
int
got_frame_local
;
AVPacket
enc_pkt
;
int
(
*
enc_func
)(
AVCodecContext
*
,
AVPacket
*
,
const
AVFrame
*
,
int
*
)
=
(
ifmt_ctx
->
streams
[
stream_index
]
->
codec
->
codec_type
==
AVMEDIA_TYPE_VIDEO
)
?
avcodec_encode_video2
:
avcodec_encode_audio2
;
if
(
!
got_frame
)
got_frame
=
&
got_frame_local
;
av_log
(
NULL
,
AV_LOG_INFO
,
"Encoding frame
\n
"
);
/* encode filtered frame */
enc_pkt
.
data
=
NULL
;
enc_pkt
.
size
=
0
;
av_init_packet
(
&
enc_pkt
);
ret
=
enc_func
(
ofmt_ctx
->
streams
[
stream_index
]
->
codec
,
&
enc_pkt
,
filt_frame
,
got_frame
);
av_frame_free
(
&
filt_frame
);
if
(
ret
<
0
)
return
ret
;
if
(
!
(
*
got_frame
))
return
0
;
/* prepare packet for muxing */
enc_pkt
.
stream_index
=
stream_index
;
av_packet_rescale_ts
(
&
enc_pkt
,
ofmt_ctx
->
streams
[
stream_index
]
->
codec
->
time_base
,
ofmt_ctx
->
streams
[
stream_index
]
->
time_base
);
av_log
(
NULL
,
AV_LOG_DEBUG
,
"Muxing frame
\n
"
);
/* mux encoded frame */
ret
=
av_interleaved_write_frame
(
ofmt_ctx
,
&
enc_pkt
);
return
ret
;
}
for
(
int
i
=
0
;
i
<
merged_info2
.
size
();
i
++
)
{
if
(
readfile
(
merged_info2
[
i
].
c_str
(),
1
)
<
0
)
{
printf
(
"open file: %s error"
,
merged_info2
[
i
].
c_str
());
return
-
2
;
static
int
filter_encode_write_frame
(
AVFrame
*
frame
,
unsigned
int
stream_index
)
{
int
ret
;
AVFrame
*
filt_frame
;
av_log
(
NULL
,
AV_LOG_INFO
,
"Pushing decoded frame to filters
\n
"
);
/* push the decoded frame into the filtergraph */
ret
=
av_buffersrc_add_frame_flags
(
filter_ctx
[
stream_index
].
buffersrc_ctx
,
frame
,
0
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Error while feeding the filtergraph
\n
"
);
return
ret
;
}
/* pull filtered frames from the filtergraph */
while
(
1
)
{
filt_frame
=
av_frame_alloc
();
if
(
!
filt_frame
)
{
ret
=
AVERROR
(
ENOMEM
);
break
;
}
av_log
(
NULL
,
AV_LOG_INFO
,
"Pulling filtered frame from filters
\n
"
);
ret
=
av_buffersink_get_frame
(
filter_ctx
[
stream_index
].
buffersink_ctx
,
filt_frame
);
if
(
ret
<
0
)
{
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if
(
ret
==
AVERROR
(
EAGAIN
)
||
ret
==
AVERROR_EOF
)
ret
=
0
;
av_frame_free
(
&
filt_frame
);
break
;
}
#if 0
get_outinfo_file_name(argv[1], argv[2]);
#else
get_outinfo_file_name
(
argv
[
1
]);
#endif
filt_frame
->
pict_type
=
AV_PICTURE_TYPE_NONE
;
ret
=
encode_write_frame
(
filt_frame
,
stream_index
,
NULL
);
if
(
ret
<
0
)
break
;
}
process_merged_files_to_pip_files
();
return
ret
;
}
if
(
!
keep_tmp_files
&&
bmerge_files
)
{
removefiles
(
all_input_files_for_pip
);
removefiles
(
merged_info1
);
removefiles
(
merged_info2
);
}
static
int
flush_encoder
(
unsigned
int
stream_index
)
{
int
ret
;
int
got_frame
;
if
(
!
(
ofmt_ctx
->
streams
[
stream_index
]
->
codec
->
codec
->
capabilities
&
CODEC_CAP_DELAY
))
return
0
;
while
(
1
)
{
av_log
(
NULL
,
AV_LOG_INFO
,
"Flushing stream #%u encoder
\n
"
,
stream_index
);
ret
=
encode_write_frame
(
NULL
,
stream_index
,
&
got_frame
);
if
(
ret
<
0
)
break
;
if
(
!
got_frame
)
return
0
;
}
return
ret
;
}
int
transcode
(
int
argc
,
char
*
argv
[]){
int
ret
;
AVPacket
packet
;
AVFrame
*
frame
=
NULL
;
enum
AVMediaType
type
;
unsigned
int
stream_index
;
unsigned
int
i
;
int
got_frame
;
int
(
*
dec_func
)(
AVCodecContext
*
,
AVFrame
*
,
int
*
,
const
AVPacket
*
);
memset
(
&
packet
,
0
,
sizeof
(
AVPacket
));
if
(
argc
!=
3
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Usage: %s <input file> <output file>
\n
"
,
argv
[
0
]);
return
1
;
}
av_register_all
();
avfilter_register_all
();
if
((
ret
=
open_input_file
(
argv
[
1
]))
<
0
)
goto
end
;
if
((
ret
=
open_output_file
(
argv
[
2
]))
<
0
)
goto
end
;
if
((
ret
=
init_filters
())
<
0
)
goto
end
;
/* read all packets */
while
(
1
)
{
if
((
ret
=
av_read_frame
(
ifmt_ctx
,
&
packet
))
<
0
)
break
;
stream_index
=
packet
.
stream_index
;
type
=
ifmt_ctx
->
streams
[
packet
.
stream_index
]
->
codec
->
codec_type
;
av_log
(
NULL
,
AV_LOG_DEBUG
,
"Demuxer gave frame of stream_index %u
\n
"
,
stream_index
);
if
(
filter_ctx
[
stream_index
].
filter_graph
)
{
av_log
(
NULL
,
AV_LOG_DEBUG
,
"Going to reencode&filter the frame
\n
"
);
frame
=
av_frame_alloc
();
if
(
!
frame
)
{
ret
=
AVERROR
(
ENOMEM
);
break
;
}
av_packet_rescale_ts
(
&
packet
,
ifmt_ctx
->
streams
[
stream_index
]
->
time_base
,
ifmt_ctx
->
streams
[
stream_index
]
->
codec
->
time_base
);
dec_func
=
(
type
==
AVMEDIA_TYPE_VIDEO
)
?
avcodec_decode_video2
:
avcodec_decode_audio4
;
ret
=
dec_func
(
ifmt_ctx
->
streams
[
stream_index
]
->
codec
,
frame
,
&
got_frame
,
&
packet
);
if
(
ret
<
0
)
{
av_frame_free
(
&
frame
);
av_log
(
NULL
,
AV_LOG_ERROR
,
"Decoding failed
\n
"
);
break
;
}
if
(
got_frame
)
{
frame
->
pts
=
av_frame_get_best_effort_timestamp
(
frame
);
ret
=
filter_encode_write_frame
(
frame
,
stream_index
);
av_frame_free
(
&
frame
);
if
(
ret
<
0
)
goto
end
;
}
else
{
av_frame_free
(
&
frame
);
}
}
else
{
/* remux this frame without reencoding */
av_packet_rescale_ts
(
&
packet
,
ifmt_ctx
->
streams
[
stream_index
]
->
time_base
,
ofmt_ctx
->
streams
[
stream_index
]
->
time_base
);
ret
=
av_interleaved_write_frame
(
ofmt_ctx
,
&
packet
);
if
(
ret
<
0
)
goto
end
;
}
av_packet_unref
(
&
packet
);
}
/* flush filters and encoders */
for
(
i
=
0
;
i
<
ifmt_ctx
->
nb_streams
;
i
++
)
{
/* flush filter */
if
(
!
filter_ctx
[
i
].
filter_graph
)
continue
;
ret
=
filter_encode_write_frame
(
NULL
,
i
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Flushing filter failed
\n
"
);
goto
end
;
}
/* flush encoder */
ret
=
flush_encoder
(
i
);
if
(
ret
<
0
)
{
av_log
(
NULL
,
AV_LOG_ERROR
,
"Flushing encoder failed
\n
"
);
goto
end
;
}
}
av_write_trailer
(
ofmt_ctx
);
end
:
av_packet_unref
(
&
packet
);
av_frame_free
(
&
frame
);
for
(
i
=
0
;
i
<
ifmt_ctx
->
nb_streams
;
i
++
)
{
avcodec_close
(
ifmt_ctx
->
streams
[
i
]
->
codec
);
if
(
ofmt_ctx
&&
ofmt_ctx
->
nb_streams
>
i
&&
ofmt_ctx
->
streams
[
i
]
&&
ofmt_ctx
->
streams
[
i
]
->
codec
)
avcodec_close
(
ofmt_ctx
->
streams
[
i
]
->
codec
);
if
(
filter_ctx
&&
filter_ctx
[
i
].
filter_graph
)
avfilter_graph_free
(
&
filter_ctx
[
i
].
filter_graph
);
}
av_free
(
filter_ctx
);
avformat_close_input
(
&
ifmt_ctx
);
if
(
ofmt_ctx
&&
!
(
ofmt_ctx
->
oformat
->
flags
&
AVFMT_NOFILE
))
avio_closep
(
&
ofmt_ctx
->
pb
);
avformat_free_context
(
ofmt_ctx
);
//if (ret < 0)
// av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
return
ret
?
1
:
0
;
}
...
...
pip/pip.vcxproj
查看文件 @
f6f4d7a
...
...
@@ -53,10 +53,12 @@
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\liveAssistant\third-lib\ffmpeg\include</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>..\..\liveAssistant\third-lib\ffmpeg\lib;</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
...
...
请
注册
或
登录
后发表评论