test_asr_streaming_ctc_microphone.js
2.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
//
const portAudio = require('naudiodon2');
// console.log(portAudio.getDevices());
const sherpa_onnx = require('sherpa-onnx-node');
function createOnlineRecognizer() {
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'zipformer2Ctc': {
'model':
'./sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18/ctc-epoch-30-avg-3-chunk-16-left-128.int8.onnx',
},
'tokens':
'./sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18/tokens.txt',
'numThreads': 2,
'provider': 'cpu',
'debug': 1,
},
'decodingMethod': 'greedy_search',
'maxActivePaths': 4,
'enableEndpoint': true,
'rule1MinTrailingSilence': 2.4,
'rule2MinTrailingSilence': 1.2,
'rule3MinUtteranceLength': 20
};
return new sherpa_onnx.OnlineRecognizer(config);
}
const recognizer = createOnlineRecognizer();
const stream = recognizer.createStream();
let lastText = '';
let segmentIndex = 0;
const ai = new portAudio.AudioIO({
inOptions: {
channelCount: 1,
closeOnError: true, // Close the stream if an audio error is detected, if
// set false then just log the error
deviceId: -1, // Use -1 or omit the deviceId to select the default device
sampleFormat: portAudio.SampleFormatFloat32,
sampleRate: recognizer.config.featConfig.sampleRate
}
});
const display = new sherpa_onnx.Display(50);
ai.on('data', data => {
const samples = new Float32Array(data.buffer);
stream.acceptWaveform(
{sampleRate: recognizer.config.featConfig.sampleRate, samples: samples});
while (recognizer.isReady(stream)) {
recognizer.decode(stream);
}
const isEndpoint = recognizer.isEndpoint(stream);
const text = recognizer.getResult(stream).text.toLowerCase();
if (text.length > 0 && lastText != text) {
lastText = text;
display.print(segmentIndex, lastText);
}
if (isEndpoint) {
if (text.length > 0) {
lastText = text;
segmentIndex += 1;
}
recognizer.reset(stream)
}
});
ai.start();
console.log('Started! Please speak')