test-onnx.py
3.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import onnxruntime as ort
import argparse
import soundfile as sf
from typing import Tuple
import numpy as np
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
required=True,
help="Path to the onnx model",
)
parser.add_argument(
"--wav",
type=str,
required=True,
help="Path to the input wav",
)
return parser.parse_args()
class OnnxModel:
def __init__(
self,
model: str,
):
session_opts = ort.SessionOptions()
session_opts.inter_op_num_threads = 1
session_opts.intra_op_num_threads = 1
self.model = ort.InferenceSession(
model,
sess_options=session_opts,
providers=["CPUExecutionProvider"],
)
def get_init_states(self):
h = np.zeros((2, 1, 64), dtype=np.float32)
c = np.zeros((2, 1, 64), dtype=np.float32)
return h, c
def __call__(self, x, h, c):
"""
Args:
x: (1, 512)
h: (2, 1, 64)
c: (2, 1, 64)
Returns:
prob: (1, 1)
next_h: (2, 1, 64)
next_c: (2, 1, 64)
"""
x = x[None]
out, next_h, next_c = self.model.run(
[
self.model.get_outputs()[0].name,
self.model.get_outputs()[1].name,
self.model.get_outputs()[2].name,
],
{
self.model.get_inputs()[0].name: x,
self.model.get_inputs()[1].name: h,
self.model.get_inputs()[2].name: c,
},
)
return out, next_h, next_c
def load_audio(filename: str) -> Tuple[np.ndarray, int]:
data, sample_rate = sf.read(
filename,
always_2d=True,
dtype="float32",
)
data = data[:, 0] # use only the first channel
samples = np.ascontiguousarray(data)
return samples, sample_rate
def main():
args = get_args()
samples, sample_rate = load_audio(args.wav)
if sample_rate != 16000:
import librosa
samples = librosa.resample(samples, orig_sr=sample_rate, target_sr=16000)
sample_rate = 16000
model = OnnxModel(args.model)
probs = []
h, c = model.get_init_states()
window_size = 512
num_windows = samples.shape[0] // window_size
for i in range(num_windows):
start = i * window_size
end = start + window_size
p, h, c = model(samples[start:end], h, c)
probs.append(p[0].item())
threshold = 0.5
out = np.array(probs) > threshold
out = out.tolist()
min_speech_duration = 0.25 * sample_rate / window_size
min_silence_duration = 0.25 * sample_rate / window_size
result = []
last = -1
for k, f in enumerate(out):
if f >= threshold:
if last == -1:
last = k
elif last != -1:
if k - last > min_speech_duration:
result.append((last, k))
last = -1
if last != -1 and k - last > min_speech_duration:
result.append((last, k))
if not result:
print(f"Empty for {args.wav}")
return
print(result)
final = [result[0]]
for r in result[1:]:
f = final[-1]
if r[0] - f[1] < min_silence_duration:
final[-1] = (f[0], r[1])
else:
final.append(r)
for f in final:
start = f[0] * window_size / sample_rate
end = f[1] * window_size / sample_rate
print("{:.3f} -- {:.3f}".format(start, end))
if __name__ == "__main__":
main()