Fangjun Kuang
Committed by GitHub

Add JavaScript API (node-addon) for homophone replacer (#2158)

... ... @@ -92,7 +92,18 @@ if [[ $arch != "ia32" && $platform != "win32" ]]; then
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
node ./test_asr_non_streaming_sense_voice.js
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
tar xf dict.tar.bz2
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
node ./test_asr_non_streaming_sense_voice_with_hr.js
rm -rf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17
rm -rf dict replace.fst test-hr.wav lexicon.txt
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2
tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2
... ... @@ -253,12 +264,21 @@ rm -f itn*
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/itn_zh_number.fst
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/itn-zh-number.wav
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
tar xf dict.tar.bz2
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
if [[ $arch != "ia32" && $platform != "win32" ]]; then
node test_asr_streaming_transducer_itn.js
node test_asr_streaming_transducer.js
node test_asr_streaming_transducer_with_hr.js
fi
rm -rf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20
rm -rf dict lexicon.txt replace.fst test-hr.wav
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18.tar.bz2
tar xvf sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18.tar.bz2
... ...
... ... @@ -141,3 +141,4 @@ README-DEV.txt
##clion
.idea
sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02
dict
... ...
... ... @@ -9,6 +9,7 @@
// defined in ./streaming-asr.cc
SherpaOnnxFeatureConfig GetFeatureConfig(Napi::Object obj);
SherpaOnnxHomophoneReplacerConfig GetHomophoneReplacerConfig(Napi::Object obj);
static SherpaOnnxOfflineTransducerModelConfig GetOfflineTransducerModelConfig(
Napi::Object obj) {
... ... @@ -261,6 +262,7 @@ CreateOfflineRecognizerWrapper(const Napi::CallbackInfo &info) {
c.feat_config = GetFeatureConfig(o);
c.model_config = GetOfflineModelConfig(o);
c.lm_config = GetOfflineLMConfig(o);
c.hr = GetHomophoneReplacerConfig(o);
SHERPA_ONNX_ASSIGN_ATTR_STR(decoding_method, decodingMethod);
SHERPA_ONNX_ASSIGN_ATTR_INT32(max_active_paths, maxActivePaths);
... ... @@ -324,6 +326,9 @@ CreateOfflineRecognizerWrapper(const Napi::CallbackInfo &info) {
SHERPA_ONNX_DELETE_C_STR(c.hotwords_file);
SHERPA_ONNX_DELETE_C_STR(c.rule_fsts);
SHERPA_ONNX_DELETE_C_STR(c.rule_fars);
SHERPA_ONNX_DELETE_C_STR(c.hr.dict_dir);
SHERPA_ONNX_DELETE_C_STR(c.hr.lexicon);
SHERPA_ONNX_DELETE_C_STR(c.hr.rule_fsts);
if (!recognizer) {
Napi::TypeError::New(env, "Please check your config!")
... ...
... ... @@ -144,6 +144,24 @@ static SherpaOnnxOnlineCtcFstDecoderConfig GetCtcFstDecoderConfig(
return c;
}
// Also used in ./non-streaming-asr.cc
SherpaOnnxHomophoneReplacerConfig GetHomophoneReplacerConfig(Napi::Object obj) {
SherpaOnnxHomophoneReplacerConfig c;
memset(&c, 0, sizeof(c));
if (!obj.Has("hr") || !obj.Get("hr").IsObject()) {
return c;
}
Napi::Object o = obj.Get("hr").As<Napi::Object>();
SHERPA_ONNX_ASSIGN_ATTR_STR(dict_dir, dictDir);
SHERPA_ONNX_ASSIGN_ATTR_STR(lexicon, lexicon);
SHERPA_ONNX_ASSIGN_ATTR_STR(rule_fsts, ruleFsts);
return c;
}
static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper(
const Napi::CallbackInfo &info) {
Napi::Env env = info.Env();
... ... @@ -179,6 +197,7 @@ static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper(
memset(&c, 0, sizeof(c));
c.feat_config = GetFeatureConfig(o);
c.model_config = GetOnlineModelConfig(o);
c.hr = GetHomophoneReplacerConfig(o);
SHERPA_ONNX_ASSIGN_ATTR_STR(decoding_method, decodingMethod);
SHERPA_ONNX_ASSIGN_ATTR_INT32(max_active_paths, maxActivePaths);
... ... @@ -243,6 +262,10 @@ static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper(
SHERPA_ONNX_DELETE_C_STR(c.hotwords_buf);
SHERPA_ONNX_DELETE_C_STR(c.ctc_fst_decoder_config.graph);
SHERPA_ONNX_DELETE_C_STR(c.hr.dict_dir);
SHERPA_ONNX_DELETE_C_STR(c.hr.lexicon);
SHERPA_ONNX_DELETE_C_STR(c.hr.rule_fsts);
if (!recognizer) {
Napi::TypeError::New(env, "Please check your config!")
.ThrowAsJavaScriptException();
... ...
... ... @@ -98,6 +98,7 @@ The following tables list the examples in this folder.
|File| Description|
|---|---|
|[./test_asr_streaming_transducer.js](./test_asr_streaming_transducer.js)| Streaming speech recognition from a file using a Zipformer transducer model|
|[./test_asr_streaming_transducer_with_hr.js](./test_asr_streaming_transducer_with_hr.js)| Streaming speech recognition from a file using a Zipformer transducer model with homophone replacer|
|[./test_asr_streaming_ctc.js](./test_asr_streaming_ctc.js)| Streaming speech recognition from a file using a Zipformer CTC model with greedy search|
|[./test_asr_streaming_ctc_hlg.js](./test_asr_streaming_ctc_hlg.js)| Streaming speech recognition from a file using a Zipformer CTC model with HLG decoding|
|[./test_asr_streaming_paraformer.js](./test_asr_streaming_paraformer.js)|Streaming speech recognition from a file using a [Paraformer](https://github.com/alibaba-damo-academy/FunASR) model|
... ... @@ -125,6 +126,7 @@ The following tables list the examples in this folder.
|[./test_asr_non_streaming_dolphin_ctc.js](./test_asr_non_streaming_dolphin_ctc.js)|Non-streaming speech recognition from a file using a [Dolphinhttps://github.com/DataoceanAI/Dolphin]) CTC model with greedy search|
|[./test_asr_non_streaming_paraformer.js](./test_asr_non_streaming_paraformer.js)|Non-streaming speech recognition from a file using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)|
|[./test_asr_non_streaming_sense_voice.js](./test_asr_non_streaming_sense_voice.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|
|[./test_asr_non_streaming_sense_voice_with_hr.js](./test_asr_non_streaming_sense_voice_with_hr.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice) with homophone replacer|
## Non-Streaming speech-to-text from a microphone with VAD
... ... @@ -207,6 +209,22 @@ rm sherpa-onnx-ced-mini-audio-tagging-2024-09-14.tar.bz2
node ./test_audio_tagging_ced.js
```
### Streaming speech recognition with Zipformer transducer with homophone replacer
```bash
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
tar xvf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
rm sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
tar xf dict.tar.bz2
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
node ./test_asr_streaming_transducer_with_hr.js
```
### Streaming speech recognition with Zipformer transducer
```bash
... ... @@ -371,6 +389,22 @@ npm install naudiodon2
node ./test_vad_asr_non_streaming_paraformer_microphone.js
```
### Non-streaming speech recognition with SenseVoice with homophone replacer
```bash
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
tar xf dict.tar.bz2
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
node ./test_asr_non_streaming_sense_voice_with_hr.js
```
### Non-streaming speech recognition with SenseVoice
```bash
... ...
// Copyright (c) 2025 Xiaomi Corporation
const sherpa_onnx = require('sherpa-onnx-node');
// Please download test files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/hr-files
// If your path contains non-ascii characters, e.g., Chinese, you can use
// the following code
//
// let encoder = new TextEncoder();
// let tokens = encoder.encode(
// './sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/测试.txt');
// let model = encoder.encode(
// './sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/测试.int8.onnx');
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'senseVoice': {
'model':
'./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx',
// 'model': model,
'useInverseTextNormalization': 1,
},
'tokens': './sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt',
// 'tokens': tokens,
'numThreads': 2,
'provider': 'cpu',
'debug': 1,
},
'hr': {
// Please download files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/hr-files
'dictDir': './dict',
'lexicon': './lexicon.txt',
'ruleFsts': './replace.fst',
}
};
const waveFilename = './test-hr.wav';
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
console.log('Started')
let start = Date.now();
const stream = recognizer.createStream();
const wave = sherpa_onnx.readWave(waveFilename);
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
recognizer.decode(stream);
result = recognizer.getResult(stream)
let stop = Date.now();
console.log('Done')
const elapsed_seconds = (stop - start) / 1000;
const duration = wave.samples.length / wave.sampleRate;
const real_time_factor = elapsed_seconds / duration;
console.log('Wave duration', duration.toFixed(3), 'seconds')
console.log('Elapsed', elapsed_seconds.toFixed(3), 'seconds')
console.log(
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
real_time_factor.toFixed(3))
console.log(waveFilename)
console.log('result\n', result)
... ...
// Copyright (c) 2025 Xiaomi Corporation
const sherpa_onnx = require('sherpa-onnx-node');
// Please download test files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'transducer': {
'encoder':
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/encoder-epoch-99-avg-1.int8.onnx',
'decoder':
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/decoder-epoch-99-avg-1.onnx',
'joiner':
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/joiner-epoch-99-avg-1.int8.onnx',
},
'tokens':
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt',
'numThreads': 2,
'provider': 'cpu',
'debug': 1,
},
'hr': {
'dictDir': './dict',
'lexicon': './lexicon.txt',
'ruleFsts': './replace.fst',
},
};
const waveFilename = './test-hr.wav';
const recognizer = new sherpa_onnx.OnlineRecognizer(config);
console.log('Started')
let start = Date.now();
const stream = recognizer.createStream();
const wave = sherpa_onnx.readWave(waveFilename);
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
const tailPadding = new Float32Array(wave.sampleRate * 0.4);
stream.acceptWaveform({samples: tailPadding, sampleRate: wave.sampleRate});
while (recognizer.isReady(stream)) {
recognizer.decode(stream);
}
result = recognizer.getResult(stream)
let stop = Date.now();
console.log('Done')
const elapsed_seconds = (stop - start) / 1000;
const duration = wave.samples.length / wave.sampleRate;
const real_time_factor = elapsed_seconds / duration;
console.log('Wave duration', duration.toFixed(3), 'seconds')
console.log('Elapsed', elapsed_seconds.toFixed(3), 'seconds')
console.log(
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
real_time_factor.toFixed(3))
console.log(waveFilename)
console.log('result\n', result)
... ...