Fangjun Kuang
Committed by GitHub

Add Javascript (node-addon) API for Dolphin CTC models (#2094)

... ... @@ -10,6 +10,16 @@ arch=$(node -p "require('os').arch()")
platform=$(node -p "require('os').platform()")
node_version=$(node -p "process.versions.node.split('.')[0]")
echo "----------non-streaming ASR dolphin CTC----------"
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02.tar.bz2
tar xvf sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02.tar.bz2
rm sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02.tar.bz2
node ./test_asr_non_streaming_dolphin_ctc.js
rm -rf sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02
echo "----------non-streaming speech denoiser----------"
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speech-enhancement-models/gtcrn_simple.onnx
... ...
... ... @@ -6,6 +6,7 @@ export { CircularBuffer, SileroVadConfig, SpeechSegment, Vad, VadConfig, } from
export { Samples,
OfflineStream,
FeatureConfig,
OfflineDolphinModelConfig,
OfflineTransducerModelConfig,
OfflineParaformerModelConfig,
OfflineNemoEncDecCtcModelConfig,
... ...
... ... @@ -44,6 +44,22 @@ static SherpaOnnxOfflineParaformerModelConfig GetOfflineParaformerModelConfig(
return c;
}
static SherpaOnnxOfflineDolphinModelConfig GetOfflineDolphinfig(
Napi::Object obj) {
SherpaOnnxOfflineDolphinModelConfig c;
memset(&c, 0, sizeof(c));
if (!obj.Has("dolphin") || !obj.Get("dolphin").IsObject()) {
return c;
}
Napi::Object o = obj.Get("dolphin").As<Napi::Object>();
SHERPA_ONNX_ASSIGN_ATTR_STR(model, model);
return c;
}
static SherpaOnnxOfflineNemoEncDecCtcModelConfig GetOfflineNeMoCtcModelConfig(
Napi::Object obj) {
SherpaOnnxOfflineNemoEncDecCtcModelConfig c;
... ... @@ -168,6 +184,7 @@ static SherpaOnnxOfflineModelConfig GetOfflineModelConfig(Napi::Object obj) {
c.sense_voice = GetOfflineSenseVoiceModelConfig(o);
c.moonshine = GetOfflineMoonshineModelConfig(o);
c.fire_red_asr = GetOfflineFireRedAsrModelConfig(o);
c.dolphin = GetOfflineDolphinfig(o);
SHERPA_ONNX_ASSIGN_ATTR_STR(tokens, tokens);
SHERPA_ONNX_ASSIGN_ATTR_INT32(num_threads, numThreads);
... ... @@ -292,6 +309,8 @@ CreateOfflineRecognizerWrapper(const Napi::CallbackInfo &info) {
SHERPA_ONNX_DELETE_C_STR(c.model_config.fire_red_asr.encoder);
SHERPA_ONNX_DELETE_C_STR(c.model_config.fire_red_asr.decoder);
SHERPA_ONNX_DELETE_C_STR(c.model_config.dolphin.model);
SHERPA_ONNX_DELETE_C_STR(c.model_config.tokens);
SHERPA_ONNX_DELETE_C_STR(c.model_config.provider);
SHERPA_ONNX_DELETE_C_STR(c.model_config.model_type);
... ...
... ... @@ -45,6 +45,10 @@ export class OfflineNemoEncDecCtcModelConfig {
public model: string = '';
}
export class OfflineDolphinModelConfig {
public model: string = '';
}
export class OfflineWhisperModelConfig {
public encoder: string = '';
public decoder: string = '';
... ... @@ -86,6 +90,7 @@ export class OfflineModelConfig {
public telespeechCtc: string = '';
public senseVoice: OfflineSenseVoiceModelConfig = new OfflineSenseVoiceModelConfig();
public moonshine: OfflineMoonshineModelConfig = new OfflineMoonshineModelConfig();
public dolphin: OfflineDolphinModelConfig = new OfflineDolphinModelConfig();
}
export class OfflineLMConfig {
... ... @@ -159,4 +164,4 @@ export class OfflineRecognizer {
return r;
}
}
\ No newline at end of file
}
... ...
... ... @@ -122,6 +122,7 @@ The following tables list the examples in this folder.
|[./test_asr_non_streaming_moonshine.js](./test_asr_non_streaming_moonshine.js)|Non-streaming speech recognition from a file using [Moonshine](https://github.com/usefulsensors/moonshine)|
|[./test_vad_with_non_streaming_asr_moonshine.js](./test_vad_with_non_streaming_asr_moonshine.js)| Non-streaming speech recognition from a file using [Moonshine](https://github.com/usefulsensors/moonshine) + [Silero VAD](https://github.com/snakers4/silero-vad)|
|[./test_asr_non_streaming_nemo_ctc.js](./test_asr_non_streaming_nemo_ctc.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search|
|[./test_asr_non_streaming_dolphin_ctc.js](./test_asr_non_streaming_dolphin_ctc.js)|Non-streaming speech recognition from a file using a [Dolphinhttps://github.com/DataoceanAI/Dolphin]) CTC model with greedy search|
|[./test_asr_non_streaming_paraformer.js](./test_asr_non_streaming_paraformer.js)|Non-streaming speech recognition from a file using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)|
|[./test_asr_non_streaming_sense_voice.js](./test_asr_non_streaming_sense_voice.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|
... ... @@ -332,6 +333,16 @@ wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_v
node ./test_vad_with_non_streaming_asr_whisper.js
```
### Non-streaming speech recognition with Dolphin CTC models
```bash
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02.tar.bz2
tar xvf sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02.tar.bz2
rm sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02.tar.bz2
node ./test_asr_non_streaming_dolphin_ctc.js
```
### Non-streaming speech recognition with NeMo CTC models
```bash
... ...
// Copyright (c) 2025 Xiaomi Corporation
const sherpa_onnx = require('sherpa-onnx-node');
// Please download test files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'dolphin': {
'model':
'./sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02/model.int8.onnx',
},
'tokens':
'./sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02/tokens.txt',
'numThreads': 2,
'provider': 'cpu',
'debug': 1,
}
};
const waveFilename =
'./sherpa-onnx-dolphin-base-ctc-multi-lang-int8-2025-04-02/test_wavs/0.wav';
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
console.log('Started')
let start = Date.now();
const stream = recognizer.createStream();
const wave = sherpa_onnx.readWave(waveFilename);
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
recognizer.decode(stream);
result = recognizer.getResult(stream)
let stop = Date.now();
console.log('Done')
const elapsed_seconds = (stop - start) / 1000;
const duration = wave.samples.length / wave.sampleRate;
const real_time_factor = elapsed_seconds / duration;
console.log('Wave duration', duration.toFixed(3), 'seconds')
console.log('Elapsed', elapsed_seconds.toFixed(3), 'seconds')
console.log(
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
real_time_factor.toFixed(3))
console.log(waveFilename)
console.log('result\n', result)
... ...