Committed by
GitHub
Add non-streaming ASR APIs for node-addon-api (#868)
正在显示
18 个修改的文件
包含
1588 行增加
和
36 行删除
| @@ -22,5 +22,39 @@ node ./test_asr_streaming_ctc.js | @@ -22,5 +22,39 @@ node ./test_asr_streaming_ctc.js | ||
| 22 | 22 | ||
| 23 | # To decode with HLG.fst | 23 | # To decode with HLG.fst |
| 24 | node ./test_asr_streaming_ctc_hlg.js | 24 | node ./test_asr_streaming_ctc_hlg.js |
| 25 | - | ||
| 26 | rm -rf sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18 | 25 | rm -rf sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18 |
| 26 | + | ||
| 27 | +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2 | ||
| 28 | +tar xvf sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2 | ||
| 29 | +rm sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2 | ||
| 30 | + | ||
| 31 | +node ./test_asr_streaming_paraformer.js | ||
| 32 | +rm -rf sherpa-onnx-streaming-paraformer-bilingual-zh-en | ||
| 33 | + | ||
| 34 | +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-en-2023-04-01.tar.bz2 | ||
| 35 | +tar xvf sherpa-onnx-zipformer-en-2023-04-01.tar.bz2 | ||
| 36 | +rm sherpa-onnx-zipformer-en-2023-04-01.tar.bz2 | ||
| 37 | + | ||
| 38 | +node ./test_asr_non_streaming_transducer.js | ||
| 39 | +rm -rf sherpa-onnx-zipformer-en-2023-04-01 | ||
| 40 | + | ||
| 41 | +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2 | ||
| 42 | +tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2 | ||
| 43 | +rm sherpa-onnx-whisper-tiny.en.tar.bz2 | ||
| 44 | + | ||
| 45 | +node ./test_asr_non_streaming_whisper.js | ||
| 46 | +rm -rf sherpa-onnx-whisper-tiny.en | ||
| 47 | + | ||
| 48 | +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2 | ||
| 49 | +tar xvf sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2 | ||
| 50 | +rm sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2 | ||
| 51 | + | ||
| 52 | +node ./test_asr_non_streaming_nemo_ctc.js | ||
| 53 | +rm -rf sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k | ||
| 54 | + | ||
| 55 | +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2 | ||
| 56 | +tar xvf sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2 | ||
| 57 | +rm sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2 | ||
| 58 | + | ||
| 59 | +node ./test_asr_non_streaming_paraformer.js | ||
| 60 | +rm -rf sherpa-onnx-paraformer-zh-2023-03-28 |
| @@ -39,7 +39,7 @@ npm install naudiodon2 | @@ -39,7 +39,7 @@ npm install naudiodon2 | ||
| 39 | node ./test_vad_microphone.js | 39 | node ./test_vad_microphone.js |
| 40 | ``` | 40 | ``` |
| 41 | 41 | ||
| 42 | -## Streaming speech recognition with zipformer transducer | 42 | +## Streaming speech recognition with Zipformer transducer |
| 43 | 43 | ||
| 44 | ```bash | 44 | ```bash |
| 45 | wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2 | 45 | wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2 |
| @@ -54,7 +54,7 @@ npm install naudiodon2 | @@ -54,7 +54,7 @@ npm install naudiodon2 | ||
| 54 | node ./test_asr_streaming_transducer_microphone.js | 54 | node ./test_asr_streaming_transducer_microphone.js |
| 55 | ``` | 55 | ``` |
| 56 | 56 | ||
| 57 | -## Streaming speech recognition with zipformer CTC | 57 | +## Streaming speech recognition with Zipformer CTC |
| 58 | 58 | ||
| 59 | ```bash | 59 | ```bash |
| 60 | wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18.tar.bz2 | 60 | wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18.tar.bz2 |
| @@ -72,3 +72,74 @@ npm install naudiodon2 | @@ -72,3 +72,74 @@ npm install naudiodon2 | ||
| 72 | node ./test_asr_streaming_ctc_microphone.js | 72 | node ./test_asr_streaming_ctc_microphone.js |
| 73 | node ./test_asr_streaming_ctc_hlg_microphone.js | 73 | node ./test_asr_streaming_ctc_hlg_microphone.js |
| 74 | ``` | 74 | ``` |
| 75 | + | ||
| 76 | +## Streaming speech recognition with Paraformer | ||
| 77 | + | ||
| 78 | +```bash | ||
| 79 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2 | ||
| 80 | +tar xvf sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2 | ||
| 81 | +rm sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2 | ||
| 82 | + | ||
| 83 | +node ./test_asr_streaming_paraformer.js | ||
| 84 | + | ||
| 85 | +# To run the test with a microphone, you need to install the package naudiodon2 | ||
| 86 | +npm install naudiodon2 | ||
| 87 | + | ||
| 88 | +node ./test_asr_streaming_paraformer_microphone.js | ||
| 89 | +``` | ||
| 90 | + | ||
| 91 | +## Non-streaming speech recognition with Zipformer transducer | ||
| 92 | + | ||
| 93 | +```bash | ||
| 94 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-en-2023-04-01.tar.bz2 | ||
| 95 | +tar xvf sherpa-onnx-zipformer-en-2023-04-01.tar.bz2 | ||
| 96 | +rm sherpa-onnx-zipformer-en-2023-04-01.tar.bz2 | ||
| 97 | + | ||
| 98 | +node ./test_asr_non_streaming_transducer.js | ||
| 99 | + | ||
| 100 | +# To run VAD + non-streaming ASR with transudcer using a microphone | ||
| 101 | +npm install naudiodon2 | ||
| 102 | +node ./test_vad_asr_non_streaming_transducer_microphone.js | ||
| 103 | +``` | ||
| 104 | + | ||
| 105 | +## Non-streaming speech recognition with Whisper | ||
| 106 | + | ||
| 107 | +```bash | ||
| 108 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2 | ||
| 109 | +tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2 | ||
| 110 | +rm sherpa-onnx-whisper-tiny.en.tar.bz2 | ||
| 111 | + | ||
| 112 | +node ./test_asr_non_streaming_whisper.js | ||
| 113 | + | ||
| 114 | +# To run VAD + non-streaming ASR with Paraformer using a microphone | ||
| 115 | +npm install naudiodon2 | ||
| 116 | +node ./test_vad_asr_non_streaming_whisper_microphone.js | ||
| 117 | +``` | ||
| 118 | + | ||
| 119 | +## Non-streaming speech recognition with NeMo CTC models | ||
| 120 | + | ||
| 121 | +```bash | ||
| 122 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2 | ||
| 123 | +tar xvf sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2 | ||
| 124 | +rm sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2 | ||
| 125 | + | ||
| 126 | +node ./test_asr_non_streaming_nemo_ctc.js | ||
| 127 | + | ||
| 128 | +# To run VAD + non-streaming ASR with Paraformer using a microphone | ||
| 129 | +npm install naudiodon2 | ||
| 130 | +node ./test_vad_asr_non_streaming_nemo_ctc_microphone.js | ||
| 131 | +``` | ||
| 132 | + | ||
| 133 | +## Non-streaming speech recognition with Paraformer | ||
| 134 | + | ||
| 135 | +```bash | ||
| 136 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2 | ||
| 137 | +tar xvf sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2 | ||
| 138 | +rm sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2 | ||
| 139 | + | ||
| 140 | +node ./test_asr_non_streaming_paraformer.js | ||
| 141 | + | ||
| 142 | +# To run VAD + non-streaming ASR with Paraformer using a microphone | ||
| 143 | +npm install naudiodon2 | ||
| 144 | +node ./test_vad_asr_non_streaming_paraformer_microphone.js | ||
| 145 | +``` |
| 1 | +// Copyright (c) 2024 Xiaomi Corporation | ||
| 2 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 3 | +const performance = require('perf_hooks').performance; | ||
| 4 | + | ||
| 5 | + | ||
| 6 | +// Please download test files from | ||
| 7 | +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 8 | +const config = { | ||
| 9 | + 'featConfig': { | ||
| 10 | + 'sampleRate': 16000, | ||
| 11 | + 'featureDim': 80, | ||
| 12 | + }, | ||
| 13 | + 'modelConfig': { | ||
| 14 | + 'nemoCtc': { | ||
| 15 | + 'model': | ||
| 16 | + './sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/model.onnx', | ||
| 17 | + }, | ||
| 18 | + 'tokens': | ||
| 19 | + './sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/tokens.txt', | ||
| 20 | + 'numThreads': 2, | ||
| 21 | + 'provider': 'cpu', | ||
| 22 | + 'debug': 1, | ||
| 23 | + } | ||
| 24 | +}; | ||
| 25 | + | ||
| 26 | +const waveFilename = | ||
| 27 | + './sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/test_wavs/de-german.wav'; | ||
| 28 | + | ||
| 29 | +const recognizer = new sherpa_onnx.OfflineRecognizer(config); | ||
| 30 | +console.log('Started') | ||
| 31 | +let start = performance.now(); | ||
| 32 | +const stream = recognizer.createStream(); | ||
| 33 | +const wave = sherpa_onnx.readWave(waveFilename); | ||
| 34 | +stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples}); | ||
| 35 | + | ||
| 36 | +recognizer.decode(stream); | ||
| 37 | +result = recognizer.getResult(stream) | ||
| 38 | +let stop = performance.now(); | ||
| 39 | +console.log('Done') | ||
| 40 | + | ||
| 41 | +const elapsed_seconds = (stop - start) / 1000; | ||
| 42 | +const duration = wave.samples.length / wave.sampleRate; | ||
| 43 | +const real_time_factor = elapsed_seconds / duration; | ||
| 44 | +console.log('Wave duration', duration.toFixed(3), 'secodns') | ||
| 45 | +console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns') | ||
| 46 | +console.log( | ||
| 47 | + `RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`, | ||
| 48 | + real_time_factor.toFixed(3)) | ||
| 49 | +console.log(waveFilename) | ||
| 50 | +console.log('result\n', result) |
| 1 | +// Copyright (c) 2024 Xiaomi Corporation | ||
| 2 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 3 | +const performance = require('perf_hooks').performance; | ||
| 4 | + | ||
| 5 | + | ||
| 6 | +// Please download test files from | ||
| 7 | +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 8 | +const config = { | ||
| 9 | + 'featConfig': { | ||
| 10 | + 'sampleRate': 16000, | ||
| 11 | + 'featureDim': 80, | ||
| 12 | + }, | ||
| 13 | + 'modelConfig': { | ||
| 14 | + 'paraformer': { | ||
| 15 | + 'model': './sherpa-onnx-paraformer-zh-2023-03-28/model.int8.onnx', | ||
| 16 | + }, | ||
| 17 | + 'tokens': './sherpa-onnx-paraformer-zh-2023-03-28/tokens.txt', | ||
| 18 | + 'numThreads': 2, | ||
| 19 | + 'provider': 'cpu', | ||
| 20 | + 'debug': 1, | ||
| 21 | + } | ||
| 22 | +}; | ||
| 23 | + | ||
| 24 | +const waveFilename = | ||
| 25 | + './sherpa-onnx-paraformer-zh-2023-03-28/test_wavs/5-henan.wav'; | ||
| 26 | + | ||
| 27 | +const recognizer = new sherpa_onnx.OfflineRecognizer(config); | ||
| 28 | +console.log('Started') | ||
| 29 | +let start = performance.now(); | ||
| 30 | +const stream = recognizer.createStream(); | ||
| 31 | +const wave = sherpa_onnx.readWave(waveFilename); | ||
| 32 | +stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples}); | ||
| 33 | + | ||
| 34 | +recognizer.decode(stream); | ||
| 35 | +result = recognizer.getResult(stream) | ||
| 36 | +let stop = performance.now(); | ||
| 37 | +console.log('Done') | ||
| 38 | + | ||
| 39 | +const elapsed_seconds = (stop - start) / 1000; | ||
| 40 | +const duration = wave.samples.length / wave.sampleRate; | ||
| 41 | +const real_time_factor = elapsed_seconds / duration; | ||
| 42 | +console.log('Wave duration', duration.toFixed(3), 'secodns') | ||
| 43 | +console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns') | ||
| 44 | +console.log( | ||
| 45 | + `RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`, | ||
| 46 | + real_time_factor.toFixed(3)) | ||
| 47 | +console.log(waveFilename) | ||
| 48 | +console.log('result\n', result) |
| 1 | +// Copyright (c) 2024 Xiaomi Corporation | ||
| 2 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 3 | +const performance = require('perf_hooks').performance; | ||
| 4 | + | ||
| 5 | + | ||
| 6 | +// Please download test files from | ||
| 7 | +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 8 | +const config = { | ||
| 9 | + 'featConfig': { | ||
| 10 | + 'sampleRate': 16000, | ||
| 11 | + 'featureDim': 80, | ||
| 12 | + }, | ||
| 13 | + 'modelConfig': { | ||
| 14 | + 'transducer': { | ||
| 15 | + 'encoder': | ||
| 16 | + './sherpa-onnx-zipformer-en-2023-04-01/encoder-epoch-99-avg-1.int8.onnx', | ||
| 17 | + 'decoder': | ||
| 18 | + './sherpa-onnx-zipformer-en-2023-04-01/decoder-epoch-99-avg-1.onnx', | ||
| 19 | + 'joiner': | ||
| 20 | + './sherpa-onnx-zipformer-en-2023-04-01/joiner-epoch-99-avg-1.int8.onnx', | ||
| 21 | + }, | ||
| 22 | + 'tokens': './sherpa-onnx-zipformer-en-2023-04-01/tokens.txt', | ||
| 23 | + 'numThreads': 2, | ||
| 24 | + 'provider': 'cpu', | ||
| 25 | + 'debug': 1, | ||
| 26 | + } | ||
| 27 | +}; | ||
| 28 | + | ||
| 29 | +const waveFilename = './sherpa-onnx-zipformer-en-2023-04-01/test_wavs/1.wav'; | ||
| 30 | + | ||
| 31 | +const recognizer = new sherpa_onnx.OfflineRecognizer(config); | ||
| 32 | +console.log('Started') | ||
| 33 | +let start = performance.now(); | ||
| 34 | +const stream = recognizer.createStream(); | ||
| 35 | +const wave = sherpa_onnx.readWave(waveFilename); | ||
| 36 | +stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples}); | ||
| 37 | + | ||
| 38 | +recognizer.decode(stream); | ||
| 39 | +result = recognizer.getResult(stream) | ||
| 40 | +let stop = performance.now(); | ||
| 41 | +console.log('Done') | ||
| 42 | + | ||
| 43 | +const elapsed_seconds = (stop - start) / 1000; | ||
| 44 | +const duration = wave.samples.length / wave.sampleRate; | ||
| 45 | +const real_time_factor = elapsed_seconds / duration; | ||
| 46 | +console.log('Wave duration', duration.toFixed(3), 'secodns') | ||
| 47 | +console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns') | ||
| 48 | +console.log( | ||
| 49 | + `RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`, | ||
| 50 | + real_time_factor.toFixed(3)) | ||
| 51 | +console.log(waveFilename) | ||
| 52 | +console.log('result\n', result) |
| 1 | +// Copyright (c) 2024 Xiaomi Corporation | ||
| 2 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 3 | +const performance = require('perf_hooks').performance; | ||
| 4 | + | ||
| 5 | + | ||
| 6 | +// Please download test files from | ||
| 7 | +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 8 | +const config = { | ||
| 9 | + 'featConfig': { | ||
| 10 | + 'sampleRate': 16000, | ||
| 11 | + 'featureDim': 80, | ||
| 12 | + }, | ||
| 13 | + 'modelConfig': { | ||
| 14 | + 'whisper': { | ||
| 15 | + 'encoder': './sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx', | ||
| 16 | + 'decoder': './sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx', | ||
| 17 | + }, | ||
| 18 | + 'tokens': './sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt', | ||
| 19 | + 'numThreads': 2, | ||
| 20 | + 'provider': 'cpu', | ||
| 21 | + 'debug': 1, | ||
| 22 | + } | ||
| 23 | +}; | ||
| 24 | + | ||
| 25 | +const waveFilename = './sherpa-onnx-whisper-tiny.en/test_wavs/0.wav'; | ||
| 26 | + | ||
| 27 | +const recognizer = new sherpa_onnx.OfflineRecognizer(config); | ||
| 28 | +console.log('Started') | ||
| 29 | +let start = performance.now(); | ||
| 30 | +const stream = recognizer.createStream(); | ||
| 31 | +const wave = sherpa_onnx.readWave(waveFilename); | ||
| 32 | +stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples}); | ||
| 33 | + | ||
| 34 | +recognizer.decode(stream); | ||
| 35 | +result = recognizer.getResult(stream) | ||
| 36 | +let stop = performance.now(); | ||
| 37 | +console.log('Done') | ||
| 38 | + | ||
| 39 | +const elapsed_seconds = (stop - start) / 1000; | ||
| 40 | +const duration = wave.samples.length / wave.sampleRate; | ||
| 41 | +const real_time_factor = elapsed_seconds / duration; | ||
| 42 | +console.log('Wave duration', duration.toFixed(3), 'secodns') | ||
| 43 | +console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns') | ||
| 44 | +console.log( | ||
| 45 | + `RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`, | ||
| 46 | + real_time_factor.toFixed(3)) | ||
| 47 | +console.log(waveFilename) | ||
| 48 | +console.log('result\n', result) |
| 1 | +// Copyright (c) 2024 Xiaomi Corporation | ||
| 2 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 3 | +const performance = require('perf_hooks').performance; | ||
| 4 | + | ||
| 5 | + | ||
| 6 | +// Please download test files from | ||
| 7 | +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 8 | +const config = { | ||
| 9 | + 'featConfig': { | ||
| 10 | + 'sampleRate': 16000, | ||
| 11 | + 'featureDim': 80, | ||
| 12 | + }, | ||
| 13 | + 'modelConfig': { | ||
| 14 | + 'paraformer': { | ||
| 15 | + 'encoder': | ||
| 16 | + './sherpa-onnx-streaming-paraformer-bilingual-zh-en/encoder.int8.onnx', | ||
| 17 | + 'decoder': | ||
| 18 | + './sherpa-onnx-streaming-paraformer-bilingual-zh-en/decoder.int8.onnx', | ||
| 19 | + }, | ||
| 20 | + 'tokens': './sherpa-onnx-streaming-paraformer-bilingual-zh-en/tokens.txt', | ||
| 21 | + 'numThreads': 2, | ||
| 22 | + 'provider': 'cpu', | ||
| 23 | + 'debug': 1, | ||
| 24 | + } | ||
| 25 | +}; | ||
| 26 | + | ||
| 27 | +const waveFilename = | ||
| 28 | + './sherpa-onnx-streaming-paraformer-bilingual-zh-en/test_wavs/0.wav'; | ||
| 29 | + | ||
| 30 | +const recognizer = new sherpa_onnx.OnlineRecognizer(config); | ||
| 31 | +console.log('Started') | ||
| 32 | +let start = performance.now(); | ||
| 33 | +const stream = recognizer.createStream(); | ||
| 34 | +const wave = sherpa_onnx.readWave(waveFilename); | ||
| 35 | +stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples}); | ||
| 36 | + | ||
| 37 | +const tailPadding = new Float32Array(wave.sampleRate * 0.4); | ||
| 38 | +stream.acceptWaveform({samples: tailPadding, sampleRate: wave.sampleRate}); | ||
| 39 | + | ||
| 40 | +while (recognizer.isReady(stream)) { | ||
| 41 | + recognizer.decode(stream); | ||
| 42 | +} | ||
| 43 | +result = recognizer.getResult(stream) | ||
| 44 | +let stop = performance.now(); | ||
| 45 | +console.log('Done') | ||
| 46 | + | ||
| 47 | +const elapsed_seconds = (stop - start) / 1000; | ||
| 48 | +const duration = wave.samples.length / wave.sampleRate; | ||
| 49 | +const real_time_factor = elapsed_seconds / duration; | ||
| 50 | +console.log('Wave duration', duration.toFixed(3), 'secodns') | ||
| 51 | +console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns') | ||
| 52 | +console.log( | ||
| 53 | + `RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`, | ||
| 54 | + real_time_factor.toFixed(3)) | ||
| 55 | +console.log(waveFilename) | ||
| 56 | +console.log('result\n', result) |
| 1 | +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang) | ||
| 2 | +// | ||
| 3 | +const portAudio = require('naudiodon2'); | ||
| 4 | +// console.log(portAudio.getDevices()); | ||
| 5 | + | ||
| 6 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 7 | + | ||
| 8 | +function createOnlineRecognizer() { | ||
| 9 | + const config = { | ||
| 10 | + 'featConfig': { | ||
| 11 | + 'sampleRate': 16000, | ||
| 12 | + 'featureDim': 80, | ||
| 13 | + }, | ||
| 14 | + 'modelConfig': { | ||
| 15 | + 'paraformer': { | ||
| 16 | + 'encoder': | ||
| 17 | + './sherpa-onnx-streaming-paraformer-bilingual-zh-en/encoder.int8.onnx', | ||
| 18 | + 'decoder': | ||
| 19 | + './sherpa-onnx-streaming-paraformer-bilingual-zh-en/decoder.int8.onnx', | ||
| 20 | + }, | ||
| 21 | + 'tokens': './sherpa-onnx-streaming-paraformer-bilingual-zh-en/tokens.txt', | ||
| 22 | + 'numThreads': 2, | ||
| 23 | + 'provider': 'cpu', | ||
| 24 | + 'debug': 1, | ||
| 25 | + }, | ||
| 26 | + 'decodingMethod': 'greedy_search', | ||
| 27 | + 'maxActivePaths': 4, | ||
| 28 | + 'enableEndpoint': true, | ||
| 29 | + 'rule1MinTrailingSilence': 2.4, | ||
| 30 | + 'rule2MinTrailingSilence': 1.2, | ||
| 31 | + 'rule3MinUtteranceLength': 20 | ||
| 32 | + }; | ||
| 33 | + | ||
| 34 | + return new sherpa_onnx.OnlineRecognizer(config); | ||
| 35 | +} | ||
| 36 | + | ||
| 37 | +const recognizer = createOnlineRecognizer(); | ||
| 38 | +const stream = recognizer.createStream(); | ||
| 39 | + | ||
| 40 | +let lastText = ''; | ||
| 41 | +let segmentIndex = 0; | ||
| 42 | + | ||
| 43 | +const ai = new portAudio.AudioIO({ | ||
| 44 | + inOptions: { | ||
| 45 | + channelCount: 1, | ||
| 46 | + closeOnError: true, // Close the stream if an audio error is detected, if | ||
| 47 | + // set false then just log the error | ||
| 48 | + deviceId: -1, // Use -1 or omit the deviceId to select the default device | ||
| 49 | + sampleFormat: portAudio.SampleFormatFloat32, | ||
| 50 | + sampleRate: recognizer.config.featConfig.sampleRate | ||
| 51 | + } | ||
| 52 | +}); | ||
| 53 | + | ||
| 54 | +const display = new sherpa_onnx.Display(50); | ||
| 55 | + | ||
| 56 | +ai.on('data', data => { | ||
| 57 | + const samples = new Float32Array(data.buffer); | ||
| 58 | + | ||
| 59 | + stream.acceptWaveform( | ||
| 60 | + {sampleRate: recognizer.config.featConfig.sampleRate, samples: samples}); | ||
| 61 | + | ||
| 62 | + while (recognizer.isReady(stream)) { | ||
| 63 | + recognizer.decode(stream); | ||
| 64 | + } | ||
| 65 | + | ||
| 66 | + const isEndpoint = recognizer.isEndpoint(stream); | ||
| 67 | + let text = recognizer.getResult(stream).text.toLowerCase(); | ||
| 68 | + | ||
| 69 | + if (isEndpoint) { | ||
| 70 | + // for online paraformer models, we have to manually padding on endpoint | ||
| 71 | + // so that the last word can be recognized | ||
| 72 | + const tailPadding = | ||
| 73 | + new Float32Array(recognizer.config.featConfig.sampleRate * 0.4); | ||
| 74 | + stream.acceptWaveform({ | ||
| 75 | + samples: tailPadding, | ||
| 76 | + sampleRate: recognizer.config.featConfig.sampleRate | ||
| 77 | + }); | ||
| 78 | + while (recognizer.isReady(stream)) { | ||
| 79 | + recognizer.decode(stream); | ||
| 80 | + } | ||
| 81 | + text = recognizer.getResult(stream).text.toLowerCase(); | ||
| 82 | + } | ||
| 83 | + | ||
| 84 | + if (text.length > 0 && lastText != text) { | ||
| 85 | + lastText = text; | ||
| 86 | + display.print(segmentIndex, lastText); | ||
| 87 | + } | ||
| 88 | + if (isEndpoint) { | ||
| 89 | + if (text.length > 0) { | ||
| 90 | + lastText = text; | ||
| 91 | + segmentIndex += 1; | ||
| 92 | + } | ||
| 93 | + recognizer.reset(stream) | ||
| 94 | + } | ||
| 95 | +}); | ||
| 96 | + | ||
| 97 | +ai.on('close', () => { | ||
| 98 | + console.log('Free resources'); | ||
| 99 | + stream.free(); | ||
| 100 | + recognizer.free(); | ||
| 101 | +}); | ||
| 102 | + | ||
| 103 | +ai.start(); | ||
| 104 | +console.log('Started! Please speak') |
| 1 | +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang) | ||
| 2 | +// | ||
| 3 | +const portAudio = require('naudiodon2'); | ||
| 4 | +// console.log(portAudio.getDevices()); | ||
| 5 | + | ||
| 6 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 7 | + | ||
| 8 | +function createRecognizer() { | ||
| 9 | + // Please download test files from | ||
| 10 | + // https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 11 | + const config = { | ||
| 12 | + 'featConfig': { | ||
| 13 | + 'sampleRate': 16000, | ||
| 14 | + 'featureDim': 80, | ||
| 15 | + }, | ||
| 16 | + 'modelConfig': { | ||
| 17 | + 'nemoCtc': { | ||
| 18 | + 'model': | ||
| 19 | + './sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/model.onnx', | ||
| 20 | + }, | ||
| 21 | + 'tokens': | ||
| 22 | + './sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/tokens.txt', | ||
| 23 | + 'numThreads': 2, | ||
| 24 | + 'provider': 'cpu', | ||
| 25 | + 'debug': 1, | ||
| 26 | + } | ||
| 27 | + }; | ||
| 28 | + | ||
| 29 | + return new sherpa_onnx.OfflineRecognizer(config); | ||
| 30 | +} | ||
| 31 | + | ||
| 32 | +function createVad() { | ||
| 33 | + // please download silero_vad.onnx from | ||
| 34 | + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx | ||
| 35 | + const config = { | ||
| 36 | + sileroVad: { | ||
| 37 | + model: './silero_vad.onnx', | ||
| 38 | + threshold: 0.5, | ||
| 39 | + minSpeechDuration: 0.25, | ||
| 40 | + minSilenceDuration: 0.5, | ||
| 41 | + windowSize: 512, | ||
| 42 | + }, | ||
| 43 | + sampleRate: 16000, | ||
| 44 | + debug: true, | ||
| 45 | + numThreads: 1, | ||
| 46 | + }; | ||
| 47 | + | ||
| 48 | + const bufferSizeInSeconds = 60; | ||
| 49 | + | ||
| 50 | + return new sherpa_onnx.Vad(config, bufferSizeInSeconds); | ||
| 51 | +} | ||
| 52 | + | ||
| 53 | +const recognizer = createRecognizer(); | ||
| 54 | +const vad = createVad(); | ||
| 55 | + | ||
| 56 | +const bufferSizeInSeconds = 30; | ||
| 57 | +const buffer = | ||
| 58 | + new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate); | ||
| 59 | + | ||
| 60 | +const ai = new portAudio.AudioIO({ | ||
| 61 | + inOptions: { | ||
| 62 | + channelCount: 1, | ||
| 63 | + closeOnError: true, // Close the stream if an audio error is detected, if | ||
| 64 | + // set false then just log the error | ||
| 65 | + deviceId: -1, // Use -1 or omit the deviceId to select the default device | ||
| 66 | + sampleFormat: portAudio.SampleFormatFloat32, | ||
| 67 | + sampleRate: vad.config.sampleRate | ||
| 68 | + } | ||
| 69 | +}); | ||
| 70 | + | ||
| 71 | +let printed = false; | ||
| 72 | +let index = 0; | ||
| 73 | +ai.on('data', data => { | ||
| 74 | + const windowSize = vad.config.sileroVad.windowSize; | ||
| 75 | + buffer.push(new Float32Array(data.buffer)); | ||
| 76 | + while (buffer.size() > windowSize) { | ||
| 77 | + const samples = buffer.get(buffer.head(), windowSize); | ||
| 78 | + buffer.pop(windowSize); | ||
| 79 | + vad.acceptWaveform(samples); | ||
| 80 | + } | ||
| 81 | + | ||
| 82 | + while (!vad.isEmpty()) { | ||
| 83 | + const segment = vad.front(); | ||
| 84 | + vad.pop(); | ||
| 85 | + const stream = recognizer.createStream(); | ||
| 86 | + stream.acceptWaveform({ | ||
| 87 | + samples: segment.samples, | ||
| 88 | + sampleRate: recognizer.config.featConfig.sampleRate | ||
| 89 | + }); | ||
| 90 | + recognizer.decode(stream); | ||
| 91 | + const r = recognizer.getResult(stream); | ||
| 92 | + if (r.text.length > 0) { | ||
| 93 | + const text = r.text.toLowerCase().trim(); | ||
| 94 | + console.log(`${index}: ${text}`); | ||
| 95 | + | ||
| 96 | + const filename = `${index}-${text}-${ | ||
| 97 | + new Date() | ||
| 98 | + .toLocaleTimeString('en-US', {hour12: false}) | ||
| 99 | + .split(' ')[0]}.wav`; | ||
| 100 | + sherpa_onnx.writeWave( | ||
| 101 | + filename, | ||
| 102 | + {samples: segment.samples, sampleRate: vad.config.sampleRate}) | ||
| 103 | + | ||
| 104 | + index += 1; | ||
| 105 | + } | ||
| 106 | + } | ||
| 107 | +}); | ||
| 108 | + | ||
| 109 | +ai.start(); | ||
| 110 | +console.log('Started! Please speak') |
| 1 | +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang) | ||
| 2 | +// | ||
| 3 | +const portAudio = require('naudiodon2'); | ||
| 4 | +// console.log(portAudio.getDevices()); | ||
| 5 | + | ||
| 6 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 7 | + | ||
| 8 | +function createRecognizer() { | ||
| 9 | + // Please download test files from | ||
| 10 | + // https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 11 | + const config = { | ||
| 12 | + 'featConfig': { | ||
| 13 | + 'sampleRate': 16000, | ||
| 14 | + 'featureDim': 80, | ||
| 15 | + }, | ||
| 16 | + 'modelConfig': { | ||
| 17 | + 'paraformer': { | ||
| 18 | + 'model': './sherpa-onnx-paraformer-zh-2023-03-28/model.int8.onnx', | ||
| 19 | + }, | ||
| 20 | + 'tokens': './sherpa-onnx-paraformer-zh-2023-03-28/tokens.txt', | ||
| 21 | + 'numThreads': 2, | ||
| 22 | + 'provider': 'cpu', | ||
| 23 | + 'debug': 1, | ||
| 24 | + } | ||
| 25 | + }; | ||
| 26 | + | ||
| 27 | + return new sherpa_onnx.OfflineRecognizer(config); | ||
| 28 | +} | ||
| 29 | + | ||
| 30 | +function createVad() { | ||
| 31 | + // please download silero_vad.onnx from | ||
| 32 | + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx | ||
| 33 | + const config = { | ||
| 34 | + sileroVad: { | ||
| 35 | + model: './silero_vad.onnx', | ||
| 36 | + threshold: 0.5, | ||
| 37 | + minSpeechDuration: 0.25, | ||
| 38 | + minSilenceDuration: 0.5, | ||
| 39 | + windowSize: 512, | ||
| 40 | + }, | ||
| 41 | + sampleRate: 16000, | ||
| 42 | + debug: true, | ||
| 43 | + numThreads: 1, | ||
| 44 | + }; | ||
| 45 | + | ||
| 46 | + const bufferSizeInSeconds = 60; | ||
| 47 | + | ||
| 48 | + return new sherpa_onnx.Vad(config, bufferSizeInSeconds); | ||
| 49 | +} | ||
| 50 | + | ||
| 51 | +const recognizer = createRecognizer(); | ||
| 52 | +const vad = createVad(); | ||
| 53 | + | ||
| 54 | +const bufferSizeInSeconds = 30; | ||
| 55 | +const buffer = | ||
| 56 | + new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate); | ||
| 57 | + | ||
| 58 | +const ai = new portAudio.AudioIO({ | ||
| 59 | + inOptions: { | ||
| 60 | + channelCount: 1, | ||
| 61 | + closeOnError: true, // Close the stream if an audio error is detected, if | ||
| 62 | + // set false then just log the error | ||
| 63 | + deviceId: -1, // Use -1 or omit the deviceId to select the default device | ||
| 64 | + sampleFormat: portAudio.SampleFormatFloat32, | ||
| 65 | + sampleRate: vad.config.sampleRate | ||
| 66 | + } | ||
| 67 | +}); | ||
| 68 | + | ||
| 69 | +let printed = false; | ||
| 70 | +let index = 0; | ||
| 71 | +ai.on('data', data => { | ||
| 72 | + const windowSize = vad.config.sileroVad.windowSize; | ||
| 73 | + buffer.push(new Float32Array(data.buffer)); | ||
| 74 | + while (buffer.size() > windowSize) { | ||
| 75 | + const samples = buffer.get(buffer.head(), windowSize); | ||
| 76 | + buffer.pop(windowSize); | ||
| 77 | + vad.acceptWaveform(samples); | ||
| 78 | + } | ||
| 79 | + | ||
| 80 | + while (!vad.isEmpty()) { | ||
| 81 | + const segment = vad.front(); | ||
| 82 | + vad.pop(); | ||
| 83 | + const stream = recognizer.createStream(); | ||
| 84 | + stream.acceptWaveform({ | ||
| 85 | + samples: segment.samples, | ||
| 86 | + sampleRate: recognizer.config.featConfig.sampleRate | ||
| 87 | + }); | ||
| 88 | + recognizer.decode(stream); | ||
| 89 | + const r = recognizer.getResult(stream); | ||
| 90 | + if (r.text.length > 0) { | ||
| 91 | + const text = r.text.toLowerCase().trim(); | ||
| 92 | + console.log(`${index}: ${text}`); | ||
| 93 | + | ||
| 94 | + const filename = `${index}-${text}-${ | ||
| 95 | + new Date() | ||
| 96 | + .toLocaleTimeString('en-US', {hour12: false}) | ||
| 97 | + .split(' ')[0]}.wav`; | ||
| 98 | + sherpa_onnx.writeWave( | ||
| 99 | + filename, | ||
| 100 | + {samples: segment.samples, sampleRate: vad.config.sampleRate}) | ||
| 101 | + | ||
| 102 | + index += 1; | ||
| 103 | + } | ||
| 104 | + } | ||
| 105 | +}); | ||
| 106 | + | ||
| 107 | +ai.start(); | ||
| 108 | +console.log('Started! Please speak') |
| 1 | +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang) | ||
| 2 | +// | ||
| 3 | +const portAudio = require('naudiodon2'); | ||
| 4 | +// console.log(portAudio.getDevices()); | ||
| 5 | + | ||
| 6 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 7 | + | ||
| 8 | +function createRecognizer() { | ||
| 9 | + // Please download test files from | ||
| 10 | + // https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 11 | + const config = { | ||
| 12 | + 'featConfig': { | ||
| 13 | + 'sampleRate': 16000, | ||
| 14 | + 'featureDim': 80, | ||
| 15 | + }, | ||
| 16 | + 'modelConfig': { | ||
| 17 | + 'transducer': { | ||
| 18 | + 'encoder': | ||
| 19 | + './sherpa-onnx-zipformer-en-2023-04-01/encoder-epoch-99-avg-1.int8.onnx', | ||
| 20 | + 'decoder': | ||
| 21 | + './sherpa-onnx-zipformer-en-2023-04-01/decoder-epoch-99-avg-1.onnx', | ||
| 22 | + 'joiner': | ||
| 23 | + './sherpa-onnx-zipformer-en-2023-04-01/joiner-epoch-99-avg-1.int8.onnx', | ||
| 24 | + }, | ||
| 25 | + 'tokens': './sherpa-onnx-zipformer-en-2023-04-01/tokens.txt', | ||
| 26 | + 'numThreads': 2, | ||
| 27 | + 'provider': 'cpu', | ||
| 28 | + 'debug': 1, | ||
| 29 | + } | ||
| 30 | + }; | ||
| 31 | + | ||
| 32 | + return new sherpa_onnx.OfflineRecognizer(config); | ||
| 33 | +} | ||
| 34 | + | ||
| 35 | +function createVad() { | ||
| 36 | + // please download silero_vad.onnx from | ||
| 37 | + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx | ||
| 38 | + const config = { | ||
| 39 | + sileroVad: { | ||
| 40 | + model: './silero_vad.onnx', | ||
| 41 | + threshold: 0.5, | ||
| 42 | + minSpeechDuration: 0.25, | ||
| 43 | + minSilenceDuration: 0.5, | ||
| 44 | + windowSize: 512, | ||
| 45 | + }, | ||
| 46 | + sampleRate: 16000, | ||
| 47 | + debug: true, | ||
| 48 | + numThreads: 1, | ||
| 49 | + }; | ||
| 50 | + | ||
| 51 | + const bufferSizeInSeconds = 60; | ||
| 52 | + | ||
| 53 | + return new sherpa_onnx.Vad(config, bufferSizeInSeconds); | ||
| 54 | +} | ||
| 55 | + | ||
| 56 | +const recognizer = createRecognizer(); | ||
| 57 | +const vad = createVad(); | ||
| 58 | + | ||
| 59 | +const bufferSizeInSeconds = 30; | ||
| 60 | +const buffer = | ||
| 61 | + new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate); | ||
| 62 | + | ||
| 63 | +const ai = new portAudio.AudioIO({ | ||
| 64 | + inOptions: { | ||
| 65 | + channelCount: 1, | ||
| 66 | + closeOnError: true, // Close the stream if an audio error is detected, if | ||
| 67 | + // set false then just log the error | ||
| 68 | + deviceId: -1, // Use -1 or omit the deviceId to select the default device | ||
| 69 | + sampleFormat: portAudio.SampleFormatFloat32, | ||
| 70 | + sampleRate: vad.config.sampleRate | ||
| 71 | + } | ||
| 72 | +}); | ||
| 73 | + | ||
| 74 | +let printed = false; | ||
| 75 | +let index = 0; | ||
| 76 | +ai.on('data', data => { | ||
| 77 | + const windowSize = vad.config.sileroVad.windowSize; | ||
| 78 | + buffer.push(new Float32Array(data.buffer)); | ||
| 79 | + while (buffer.size() > windowSize) { | ||
| 80 | + const samples = buffer.get(buffer.head(), windowSize); | ||
| 81 | + buffer.pop(windowSize); | ||
| 82 | + vad.acceptWaveform(samples); | ||
| 83 | + } | ||
| 84 | + | ||
| 85 | + while (!vad.isEmpty()) { | ||
| 86 | + const segment = vad.front(); | ||
| 87 | + vad.pop(); | ||
| 88 | + const stream = recognizer.createStream(); | ||
| 89 | + stream.acceptWaveform({ | ||
| 90 | + samples: segment.samples, | ||
| 91 | + sampleRate: recognizer.config.featConfig.sampleRate | ||
| 92 | + }); | ||
| 93 | + recognizer.decode(stream); | ||
| 94 | + const r = recognizer.getResult(stream); | ||
| 95 | + if (r.text.length > 0) { | ||
| 96 | + const text = r.text.toLowerCase().trim(); | ||
| 97 | + console.log(`${index}: ${text}`); | ||
| 98 | + | ||
| 99 | + const filename = `${index}-${text}-${ | ||
| 100 | + new Date() | ||
| 101 | + .toLocaleTimeString('en-US', {hour12: false}) | ||
| 102 | + .split(' ')[0]}.wav`; | ||
| 103 | + sherpa_onnx.writeWave( | ||
| 104 | + filename, | ||
| 105 | + {samples: segment.samples, sampleRate: vad.config.sampleRate}) | ||
| 106 | + | ||
| 107 | + index += 1; | ||
| 108 | + } | ||
| 109 | + } | ||
| 110 | +}); | ||
| 111 | + | ||
| 112 | +ai.start(); | ||
| 113 | +console.log('Started! Please speak') |
| 1 | +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang) | ||
| 2 | +// | ||
| 3 | +const portAudio = require('naudiodon2'); | ||
| 4 | +// console.log(portAudio.getDevices()); | ||
| 5 | + | ||
| 6 | +const sherpa_onnx = require('sherpa-onnx-node'); | ||
| 7 | + | ||
| 8 | +function createRecognizer() { | ||
| 9 | + // Please download test files from | ||
| 10 | + // https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models | ||
| 11 | + const config = { | ||
| 12 | + 'featConfig': { | ||
| 13 | + 'sampleRate': 16000, | ||
| 14 | + 'featureDim': 80, | ||
| 15 | + }, | ||
| 16 | + 'modelConfig': { | ||
| 17 | + 'whisper': { | ||
| 18 | + 'encoder': './sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx', | ||
| 19 | + 'decoder': './sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx', | ||
| 20 | + }, | ||
| 21 | + 'tokens': './sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt', | ||
| 22 | + 'numThreads': 2, | ||
| 23 | + 'provider': 'cpu', | ||
| 24 | + 'debug': 1, | ||
| 25 | + } | ||
| 26 | + }; | ||
| 27 | + | ||
| 28 | + return new sherpa_onnx.OfflineRecognizer(config); | ||
| 29 | +} | ||
| 30 | + | ||
| 31 | +function createVad() { | ||
| 32 | + // please download silero_vad.onnx from | ||
| 33 | + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx | ||
| 34 | + const config = { | ||
| 35 | + sileroVad: { | ||
| 36 | + model: './silero_vad.onnx', | ||
| 37 | + threshold: 0.5, | ||
| 38 | + minSpeechDuration: 0.25, | ||
| 39 | + minSilenceDuration: 0.5, | ||
| 40 | + windowSize: 512, | ||
| 41 | + }, | ||
| 42 | + sampleRate: 16000, | ||
| 43 | + debug: true, | ||
| 44 | + numThreads: 1, | ||
| 45 | + }; | ||
| 46 | + | ||
| 47 | + const bufferSizeInSeconds = 60; | ||
| 48 | + | ||
| 49 | + return new sherpa_onnx.Vad(config, bufferSizeInSeconds); | ||
| 50 | +} | ||
| 51 | + | ||
| 52 | +const recognizer = createRecognizer(); | ||
| 53 | +const vad = createVad(); | ||
| 54 | + | ||
| 55 | +const bufferSizeInSeconds = 30; | ||
| 56 | +const buffer = | ||
| 57 | + new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate); | ||
| 58 | + | ||
| 59 | +const ai = new portAudio.AudioIO({ | ||
| 60 | + inOptions: { | ||
| 61 | + channelCount: 1, | ||
| 62 | + closeOnError: true, // Close the stream if an audio error is detected, if | ||
| 63 | + // set false then just log the error | ||
| 64 | + deviceId: -1, // Use -1 or omit the deviceId to select the default device | ||
| 65 | + sampleFormat: portAudio.SampleFormatFloat32, | ||
| 66 | + sampleRate: vad.config.sampleRate | ||
| 67 | + } | ||
| 68 | +}); | ||
| 69 | + | ||
| 70 | +let printed = false; | ||
| 71 | +let index = 0; | ||
| 72 | +ai.on('data', data => { | ||
| 73 | + const windowSize = vad.config.sileroVad.windowSize; | ||
| 74 | + buffer.push(new Float32Array(data.buffer)); | ||
| 75 | + while (buffer.size() > windowSize) { | ||
| 76 | + const samples = buffer.get(buffer.head(), windowSize); | ||
| 77 | + buffer.pop(windowSize); | ||
| 78 | + vad.acceptWaveform(samples); | ||
| 79 | + } | ||
| 80 | + | ||
| 81 | + while (!vad.isEmpty()) { | ||
| 82 | + const segment = vad.front(); | ||
| 83 | + vad.pop(); | ||
| 84 | + const stream = recognizer.createStream(); | ||
| 85 | + stream.acceptWaveform({ | ||
| 86 | + samples: segment.samples, | ||
| 87 | + sampleRate: recognizer.config.featConfig.sampleRate | ||
| 88 | + }); | ||
| 89 | + recognizer.decode(stream); | ||
| 90 | + const r = recognizer.getResult(stream); | ||
| 91 | + if (r.text.length > 0) { | ||
| 92 | + const text = r.text.toLowerCase().trim(); | ||
| 93 | + console.log(`${index}: ${text}`); | ||
| 94 | + | ||
| 95 | + const filename = `${index}-${text}-${ | ||
| 96 | + new Date() | ||
| 97 | + .toLocaleTimeString('en-US', {hour12: false}) | ||
| 98 | + .split(' ')[0]}.wav`; | ||
| 99 | + sherpa_onnx.writeWave( | ||
| 100 | + filename, | ||
| 101 | + {samples: segment.samples, sampleRate: vad.config.sampleRate}) | ||
| 102 | + | ||
| 103 | + index += 1; | ||
| 104 | + } | ||
| 105 | + } | ||
| 106 | +}); | ||
| 107 | + | ||
| 108 | +ai.start(); | ||
| 109 | +console.log('Started! Please speak') |
| @@ -18,6 +18,7 @@ add_definitions(-DNAPI_VERSION=3) | @@ -18,6 +18,7 @@ add_definitions(-DNAPI_VERSION=3) | ||
| 18 | include_directories(${CMAKE_JS_INC}) | 18 | include_directories(${CMAKE_JS_INC}) |
| 19 | 19 | ||
| 20 | set(srcs | 20 | set(srcs |
| 21 | + src/non-streaming-asr.cc | ||
| 21 | src/sherpa-onnx-node-addon-api.cc | 22 | src/sherpa-onnx-node-addon-api.cc |
| 22 | src/streaming-asr.cc | 23 | src/streaming-asr.cc |
| 23 | src/vad.cc | 24 | src/vad.cc |
| 1 | +const addon = require('./addon.js'); | ||
| 2 | + | ||
| 3 | +class OfflineStream { | ||
| 4 | + constructor(handle) { | ||
| 5 | + this.handle = handle; | ||
| 6 | + } | ||
| 7 | + | ||
| 8 | + // obj is {samples: samples, sampleRate: sampleRate} | ||
| 9 | + // samples is a float32 array containing samples in the range [-1, 1] | ||
| 10 | + // sampleRate is a number | ||
| 11 | + acceptWaveform(obj) { | ||
| 12 | + addon.acceptWaveformOffline(this.handle, obj) | ||
| 13 | + } | ||
| 14 | +} | ||
| 15 | + | ||
| 16 | +class OfflineRecognizer { | ||
| 17 | + constructor(config) { | ||
| 18 | + this.handle = addon.createOfflineRecognizer(config); | ||
| 19 | + this.config = config | ||
| 20 | + } | ||
| 21 | + | ||
| 22 | + createStream() { | ||
| 23 | + const handle = addon.createOfflineStream(this.handle); | ||
| 24 | + return new OfflineStream(handle); | ||
| 25 | + } | ||
| 26 | + | ||
| 27 | + decode(stream) { | ||
| 28 | + addon.decodeOfflineStream(this.handle, stream.handle); | ||
| 29 | + } | ||
| 30 | + | ||
| 31 | + getResult(stream) { | ||
| 32 | + const jsonStr = addon.getOfflineStreamResultAsJson(stream.handle); | ||
| 33 | + | ||
| 34 | + return JSON.parse(jsonStr); | ||
| 35 | + } | ||
| 36 | +} | ||
| 37 | + | ||
| 38 | +module.exports = { | ||
| 39 | + OfflineRecognizer, | ||
| 40 | +} |
| 1 | const addon = require('./addon.js') | 1 | const addon = require('./addon.js') |
| 2 | const streaming_asr = require('./streaming-asr.js'); | 2 | const streaming_asr = require('./streaming-asr.js'); |
| 3 | +const non_streaming_asr = require('./non-streaming-asr.js'); | ||
| 3 | const vad = require('./vad.js'); | 4 | const vad = require('./vad.js'); |
| 4 | 5 | ||
| 5 | module.exports = { | 6 | module.exports = { |
| 6 | OnlineRecognizer: streaming_asr.OnlineRecognizer, | 7 | OnlineRecognizer: streaming_asr.OnlineRecognizer, |
| 8 | + OfflineRecognizer: non_streaming_asr.OfflineRecognizer, | ||
| 7 | readWave: addon.readWave, | 9 | readWave: addon.readWave, |
| 8 | writeWave: addon.writeWave, | 10 | writeWave: addon.writeWave, |
| 9 | Display: streaming_asr.Display, | 11 | Display: streaming_asr.Display, |
| 1 | +// scripts/node-addon-api/src/non-streaming-asr.cc | ||
| 2 | +// | ||
| 3 | +// Copyright (c) 2024 Xiaomi Corporation | ||
| 4 | +#include <sstream> | ||
| 5 | + | ||
| 6 | +#include "napi.h" // NOLINT | ||
| 7 | +#include "sherpa-onnx/c-api/c-api.h" | ||
| 8 | + | ||
| 9 | +// defined in ./streaming-asr.cc | ||
| 10 | +SherpaOnnxFeatureConfig GetFeatureConfig(Napi::Object obj); | ||
| 11 | + | ||
| 12 | +static SherpaOnnxOfflineTransducerModelConfig GetOfflineTransducerModelConfig( | ||
| 13 | + Napi::Object obj) { | ||
| 14 | + SherpaOnnxOfflineTransducerModelConfig config; | ||
| 15 | + memset(&config, 0, sizeof(config)); | ||
| 16 | + | ||
| 17 | + if (!obj.Has("transducer") || !obj.Get("transducer").IsObject()) { | ||
| 18 | + return config; | ||
| 19 | + } | ||
| 20 | + | ||
| 21 | + Napi::Object o = obj.Get("transducer").As<Napi::Object>(); | ||
| 22 | + | ||
| 23 | + if (o.Has("encoder") && o.Get("encoder").IsString()) { | ||
| 24 | + Napi::String encoder = o.Get("encoder").As<Napi::String>(); | ||
| 25 | + std::string s = encoder.Utf8Value(); | ||
| 26 | + char *p = new char[s.size() + 1]; | ||
| 27 | + std::copy(s.begin(), s.end(), p); | ||
| 28 | + p[s.size()] = 0; | ||
| 29 | + | ||
| 30 | + config.encoder = p; | ||
| 31 | + } | ||
| 32 | + | ||
| 33 | + if (o.Has("decoder") && o.Get("decoder").IsString()) { | ||
| 34 | + Napi::String decoder = o.Get("decoder").As<Napi::String>(); | ||
| 35 | + std::string s = decoder.Utf8Value(); | ||
| 36 | + char *p = new char[s.size() + 1]; | ||
| 37 | + std::copy(s.begin(), s.end(), p); | ||
| 38 | + p[s.size()] = 0; | ||
| 39 | + | ||
| 40 | + config.decoder = p; | ||
| 41 | + } | ||
| 42 | + | ||
| 43 | + if (o.Has("joiner") && o.Get("joiner").IsString()) { | ||
| 44 | + Napi::String joiner = o.Get("joiner").As<Napi::String>(); | ||
| 45 | + std::string s = joiner.Utf8Value(); | ||
| 46 | + char *p = new char[s.size() + 1]; | ||
| 47 | + std::copy(s.begin(), s.end(), p); | ||
| 48 | + p[s.size()] = 0; | ||
| 49 | + | ||
| 50 | + config.joiner = p; | ||
| 51 | + } | ||
| 52 | + | ||
| 53 | + return config; | ||
| 54 | +} | ||
| 55 | + | ||
| 56 | +static SherpaOnnxOfflineParaformerModelConfig GetOfflineParaformerModelConfig( | ||
| 57 | + Napi::Object obj) { | ||
| 58 | + SherpaOnnxOfflineParaformerModelConfig config; | ||
| 59 | + memset(&config, 0, sizeof(config)); | ||
| 60 | + | ||
| 61 | + if (!obj.Has("paraformer") || !obj.Get("paraformer").IsObject()) { | ||
| 62 | + return config; | ||
| 63 | + } | ||
| 64 | + | ||
| 65 | + Napi::Object o = obj.Get("paraformer").As<Napi::Object>(); | ||
| 66 | + | ||
| 67 | + if (o.Has("model") && o.Get("model").IsString()) { | ||
| 68 | + Napi::String model = o.Get("model").As<Napi::String>(); | ||
| 69 | + std::string s = model.Utf8Value(); | ||
| 70 | + char *p = new char[s.size() + 1]; | ||
| 71 | + std::copy(s.begin(), s.end(), p); | ||
| 72 | + p[s.size()] = 0; | ||
| 73 | + | ||
| 74 | + config.model = p; | ||
| 75 | + } | ||
| 76 | + | ||
| 77 | + return config; | ||
| 78 | +} | ||
| 79 | + | ||
| 80 | +static SherpaOnnxOfflineNemoEncDecCtcModelConfig GetOfflineNeMoCtcModelConfig( | ||
| 81 | + Napi::Object obj) { | ||
| 82 | + SherpaOnnxOfflineNemoEncDecCtcModelConfig config; | ||
| 83 | + memset(&config, 0, sizeof(config)); | ||
| 84 | + | ||
| 85 | + if (!obj.Has("nemoCtc") || !obj.Get("nemoCtc").IsObject()) { | ||
| 86 | + return config; | ||
| 87 | + } | ||
| 88 | + | ||
| 89 | + Napi::Object o = obj.Get("nemoCtc").As<Napi::Object>(); | ||
| 90 | + | ||
| 91 | + if (o.Has("model") && o.Get("model").IsString()) { | ||
| 92 | + Napi::String model = o.Get("model").As<Napi::String>(); | ||
| 93 | + std::string s = model.Utf8Value(); | ||
| 94 | + char *p = new char[s.size() + 1]; | ||
| 95 | + std::copy(s.begin(), s.end(), p); | ||
| 96 | + p[s.size()] = 0; | ||
| 97 | + | ||
| 98 | + config.model = p; | ||
| 99 | + } | ||
| 100 | + | ||
| 101 | + return config; | ||
| 102 | +} | ||
| 103 | + | ||
| 104 | +static SherpaOnnxOfflineWhisperModelConfig GetOfflineWhisperModelConfig( | ||
| 105 | + Napi::Object obj) { | ||
| 106 | + SherpaOnnxOfflineWhisperModelConfig config; | ||
| 107 | + memset(&config, 0, sizeof(config)); | ||
| 108 | + | ||
| 109 | + if (!obj.Has("whisper") || !obj.Get("whisper").IsObject()) { | ||
| 110 | + return config; | ||
| 111 | + } | ||
| 112 | + | ||
| 113 | + Napi::Object o = obj.Get("whisper").As<Napi::Object>(); | ||
| 114 | + | ||
| 115 | + if (o.Has("encoder") && o.Get("encoder").IsString()) { | ||
| 116 | + Napi::String encoder = o.Get("encoder").As<Napi::String>(); | ||
| 117 | + std::string s = encoder.Utf8Value(); | ||
| 118 | + char *p = new char[s.size() + 1]; | ||
| 119 | + std::copy(s.begin(), s.end(), p); | ||
| 120 | + p[s.size()] = 0; | ||
| 121 | + | ||
| 122 | + config.encoder = p; | ||
| 123 | + } | ||
| 124 | + | ||
| 125 | + if (o.Has("decoder") && o.Get("decoder").IsString()) { | ||
| 126 | + Napi::String decoder = o.Get("decoder").As<Napi::String>(); | ||
| 127 | + std::string s = decoder.Utf8Value(); | ||
| 128 | + char *p = new char[s.size() + 1]; | ||
| 129 | + std::copy(s.begin(), s.end(), p); | ||
| 130 | + p[s.size()] = 0; | ||
| 131 | + | ||
| 132 | + config.decoder = p; | ||
| 133 | + } | ||
| 134 | + | ||
| 135 | + if (o.Has("language") && o.Get("language").IsString()) { | ||
| 136 | + Napi::String language = o.Get("language").As<Napi::String>(); | ||
| 137 | + std::string s = language.Utf8Value(); | ||
| 138 | + char *p = new char[s.size() + 1]; | ||
| 139 | + std::copy(s.begin(), s.end(), p); | ||
| 140 | + p[s.size()] = 0; | ||
| 141 | + | ||
| 142 | + config.language = p; | ||
| 143 | + } | ||
| 144 | + | ||
| 145 | + if (o.Has("task") && o.Get("task").IsString()) { | ||
| 146 | + Napi::String task = o.Get("task").As<Napi::String>(); | ||
| 147 | + std::string s = task.Utf8Value(); | ||
| 148 | + char *p = new char[s.size() + 1]; | ||
| 149 | + std::copy(s.begin(), s.end(), p); | ||
| 150 | + p[s.size()] = 0; | ||
| 151 | + | ||
| 152 | + config.task = p; | ||
| 153 | + } | ||
| 154 | + | ||
| 155 | + return config; | ||
| 156 | +} | ||
| 157 | + | ||
| 158 | +static SherpaOnnxOfflineTdnnModelConfig GetOfflineTdnnModelConfig( | ||
| 159 | + Napi::Object obj) { | ||
| 160 | + SherpaOnnxOfflineTdnnModelConfig config; | ||
| 161 | + memset(&config, 0, sizeof(config)); | ||
| 162 | + | ||
| 163 | + if (!obj.Has("tdnn") || !obj.Get("tdnn").IsObject()) { | ||
| 164 | + return config; | ||
| 165 | + } | ||
| 166 | + | ||
| 167 | + Napi::Object o = obj.Get("tdnn").As<Napi::Object>(); | ||
| 168 | + | ||
| 169 | + if (o.Has("model") && o.Get("model").IsString()) { | ||
| 170 | + Napi::String model = o.Get("model").As<Napi::String>(); | ||
| 171 | + std::string s = model.Utf8Value(); | ||
| 172 | + char *p = new char[s.size() + 1]; | ||
| 173 | + std::copy(s.begin(), s.end(), p); | ||
| 174 | + p[s.size()] = 0; | ||
| 175 | + | ||
| 176 | + config.model = p; | ||
| 177 | + } | ||
| 178 | + | ||
| 179 | + return config; | ||
| 180 | +} | ||
| 181 | + | ||
| 182 | +static SherpaOnnxOfflineModelConfig GetOfflineModelConfig(Napi::Object obj) { | ||
| 183 | + SherpaOnnxOfflineModelConfig c; | ||
| 184 | + memset(&c, 0, sizeof(c)); | ||
| 185 | + | ||
| 186 | + if (!obj.Has("modelConfig") || !obj.Get("modelConfig").IsObject()) { | ||
| 187 | + return c; | ||
| 188 | + } | ||
| 189 | + | ||
| 190 | + Napi::Object o = obj.Get("modelConfig").As<Napi::Object>(); | ||
| 191 | + | ||
| 192 | + c.transducer = GetOfflineTransducerModelConfig(o); | ||
| 193 | + c.paraformer = GetOfflineParaformerModelConfig(o); | ||
| 194 | + c.nemo_ctc = GetOfflineNeMoCtcModelConfig(o); | ||
| 195 | + c.whisper = GetOfflineWhisperModelConfig(o); | ||
| 196 | + c.tdnn = GetOfflineTdnnModelConfig(o); | ||
| 197 | + | ||
| 198 | + if (o.Has("tokens") && o.Get("tokens").IsString()) { | ||
| 199 | + Napi::String tokens = o.Get("tokens").As<Napi::String>(); | ||
| 200 | + std::string s = tokens.Utf8Value(); | ||
| 201 | + char *p = new char[s.size() + 1]; | ||
| 202 | + std::copy(s.begin(), s.end(), p); | ||
| 203 | + p[s.size()] = 0; | ||
| 204 | + | ||
| 205 | + c.tokens = p; | ||
| 206 | + } | ||
| 207 | + | ||
| 208 | + if (o.Has("numThreads") && o.Get("numThreads").IsNumber()) { | ||
| 209 | + c.num_threads = o.Get("numThreads").As<Napi::Number>().Int32Value(); | ||
| 210 | + } | ||
| 211 | + | ||
| 212 | + if (o.Has("debug") && | ||
| 213 | + (o.Get("debug").IsNumber() || o.Get("debug").IsBoolean())) { | ||
| 214 | + if (o.Get("debug").IsBoolean()) { | ||
| 215 | + c.debug = o.Get("debug").As<Napi::Boolean>().Value(); | ||
| 216 | + } else { | ||
| 217 | + c.debug = o.Get("debug").As<Napi::Number>().Int32Value(); | ||
| 218 | + } | ||
| 219 | + } | ||
| 220 | + | ||
| 221 | + if (o.Has("provider") && o.Get("provider").IsString()) { | ||
| 222 | + Napi::String provider = o.Get("provider").As<Napi::String>(); | ||
| 223 | + std::string s = provider.Utf8Value(); | ||
| 224 | + char *p = new char[s.size() + 1]; | ||
| 225 | + std::copy(s.begin(), s.end(), p); | ||
| 226 | + p[s.size()] = 0; | ||
| 227 | + | ||
| 228 | + c.provider = p; | ||
| 229 | + } | ||
| 230 | + | ||
| 231 | + if (o.Has("modelType") && o.Get("modelType").IsString()) { | ||
| 232 | + Napi::String model_type = o.Get("modelType").As<Napi::String>(); | ||
| 233 | + std::string s = model_type.Utf8Value(); | ||
| 234 | + char *p = new char[s.size() + 1]; | ||
| 235 | + std::copy(s.begin(), s.end(), p); | ||
| 236 | + p[s.size()] = 0; | ||
| 237 | + | ||
| 238 | + c.model_type = p; | ||
| 239 | + } | ||
| 240 | + | ||
| 241 | + return c; | ||
| 242 | +} | ||
| 243 | + | ||
| 244 | +static SherpaOnnxOfflineLMConfig GetOfflineLMConfig(Napi::Object obj) { | ||
| 245 | + SherpaOnnxOfflineLMConfig c; | ||
| 246 | + memset(&c, 0, sizeof(c)); | ||
| 247 | + | ||
| 248 | + if (!obj.Has("lmConfig") || !obj.Get("lmConfig").IsObject()) { | ||
| 249 | + return c; | ||
| 250 | + } | ||
| 251 | + | ||
| 252 | + Napi::Object o = obj.Get("lmConfig").As<Napi::Object>(); | ||
| 253 | + | ||
| 254 | + if (o.Has("model") && o.Get("model").IsString()) { | ||
| 255 | + Napi::String model = o.Get("model").As<Napi::String>(); | ||
| 256 | + std::string s = model.Utf8Value(); | ||
| 257 | + char *p = new char[s.size() + 1]; | ||
| 258 | + std::copy(s.begin(), s.end(), p); | ||
| 259 | + p[s.size()] = 0; | ||
| 260 | + | ||
| 261 | + c.model = p; | ||
| 262 | + } | ||
| 263 | + | ||
| 264 | + if (o.Has("scale") && o.Get("scale").IsNumber()) { | ||
| 265 | + c.scale = o.Get("scale").As<Napi::Number>().FloatValue(); | ||
| 266 | + } | ||
| 267 | + | ||
| 268 | + return c; | ||
| 269 | +} | ||
| 270 | + | ||
| 271 | +static Napi::External<SherpaOnnxOfflineRecognizer> | ||
| 272 | +CreateOfflineRecognizerWrapper(const Napi::CallbackInfo &info) { | ||
| 273 | + Napi::Env env = info.Env(); | ||
| 274 | + if (info.Length() != 1) { | ||
| 275 | + std::ostringstream os; | ||
| 276 | + os << "Expect only 1 argument. Given: " << info.Length(); | ||
| 277 | + | ||
| 278 | + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException(); | ||
| 279 | + | ||
| 280 | + return {}; | ||
| 281 | + } | ||
| 282 | + | ||
| 283 | + if (!info[0].IsObject()) { | ||
| 284 | + Napi::TypeError::New(env, "Expect an object as the argument") | ||
| 285 | + .ThrowAsJavaScriptException(); | ||
| 286 | + | ||
| 287 | + return {}; | ||
| 288 | + } | ||
| 289 | + | ||
| 290 | + Napi::Object o = info[0].As<Napi::Object>(); | ||
| 291 | + | ||
| 292 | + SherpaOnnxOfflineRecognizerConfig c; | ||
| 293 | + memset(&c, 0, sizeof(c)); | ||
| 294 | + c.feat_config = GetFeatureConfig(o); | ||
| 295 | + c.model_config = GetOfflineModelConfig(o); | ||
| 296 | + c.lm_config = GetOfflineLMConfig(o); | ||
| 297 | + | ||
| 298 | + if (o.Has("decodingMethod") && o.Get("decodingMethod").IsString()) { | ||
| 299 | + Napi::String decoding_method = o.Get("decodingMethod").As<Napi::String>(); | ||
| 300 | + std::string s = decoding_method.Utf8Value(); | ||
| 301 | + char *p = new char[s.size() + 1]; | ||
| 302 | + std::copy(s.begin(), s.end(), p); | ||
| 303 | + p[s.size()] = 0; | ||
| 304 | + | ||
| 305 | + c.decoding_method = p; | ||
| 306 | + } | ||
| 307 | + | ||
| 308 | + if (o.Has("maxActivePaths") && o.Get("maxActivePaths").IsNumber()) { | ||
| 309 | + c.max_active_paths = | ||
| 310 | + o.Get("maxActivePaths").As<Napi::Number>().Int32Value(); | ||
| 311 | + } | ||
| 312 | + | ||
| 313 | + if (o.Has("hotwordsFile") && o.Get("hotwordsFile").IsString()) { | ||
| 314 | + Napi::String hotwords_file = o.Get("hotwordsFile").As<Napi::String>(); | ||
| 315 | + std::string s = hotwords_file.Utf8Value(); | ||
| 316 | + char *p = new char[s.size() + 1]; | ||
| 317 | + std::copy(s.begin(), s.end(), p); | ||
| 318 | + p[s.size()] = 0; | ||
| 319 | + | ||
| 320 | + c.hotwords_file = p; | ||
| 321 | + } | ||
| 322 | + | ||
| 323 | + if (o.Has("hotwordsScore") && o.Get("hotwordsScore").IsNumber()) { | ||
| 324 | + c.hotwords_score = o.Get("hotwordsScore").As<Napi::Number>().FloatValue(); | ||
| 325 | + } | ||
| 326 | + | ||
| 327 | + SherpaOnnxOfflineRecognizer *recognizer = CreateOfflineRecognizer(&c); | ||
| 328 | + | ||
| 329 | + if (c.model_config.transducer.encoder) { | ||
| 330 | + delete[] c.model_config.transducer.encoder; | ||
| 331 | + } | ||
| 332 | + | ||
| 333 | + if (c.model_config.transducer.decoder) { | ||
| 334 | + delete[] c.model_config.transducer.decoder; | ||
| 335 | + } | ||
| 336 | + | ||
| 337 | + if (c.model_config.transducer.joiner) { | ||
| 338 | + delete[] c.model_config.transducer.joiner; | ||
| 339 | + } | ||
| 340 | + | ||
| 341 | + if (c.model_config.paraformer.model) { | ||
| 342 | + delete[] c.model_config.paraformer.model; | ||
| 343 | + } | ||
| 344 | + | ||
| 345 | + if (c.model_config.nemo_ctc.model) { | ||
| 346 | + delete[] c.model_config.nemo_ctc.model; | ||
| 347 | + } | ||
| 348 | + | ||
| 349 | + if (c.model_config.whisper.encoder) { | ||
| 350 | + delete[] c.model_config.whisper.encoder; | ||
| 351 | + } | ||
| 352 | + | ||
| 353 | + if (c.model_config.whisper.decoder) { | ||
| 354 | + delete[] c.model_config.whisper.decoder; | ||
| 355 | + } | ||
| 356 | + | ||
| 357 | + if (c.model_config.whisper.language) { | ||
| 358 | + delete[] c.model_config.whisper.language; | ||
| 359 | + } | ||
| 360 | + | ||
| 361 | + if (c.model_config.whisper.task) { | ||
| 362 | + delete[] c.model_config.whisper.task; | ||
| 363 | + } | ||
| 364 | + | ||
| 365 | + if (c.model_config.tdnn.model) { | ||
| 366 | + delete[] c.model_config.tdnn.model; | ||
| 367 | + } | ||
| 368 | + | ||
| 369 | + if (c.model_config.tokens) { | ||
| 370 | + delete[] c.model_config.tokens; | ||
| 371 | + } | ||
| 372 | + | ||
| 373 | + if (c.model_config.provider) { | ||
| 374 | + delete[] c.model_config.provider; | ||
| 375 | + } | ||
| 376 | + | ||
| 377 | + if (c.model_config.model_type) { | ||
| 378 | + delete[] c.model_config.model_type; | ||
| 379 | + } | ||
| 380 | + | ||
| 381 | + if (c.lm_config.model) { | ||
| 382 | + delete[] c.lm_config.model; | ||
| 383 | + } | ||
| 384 | + | ||
| 385 | + if (c.decoding_method) { | ||
| 386 | + delete[] c.decoding_method; | ||
| 387 | + } | ||
| 388 | + | ||
| 389 | + if (c.hotwords_file) { | ||
| 390 | + delete[] c.hotwords_file; | ||
| 391 | + } | ||
| 392 | + | ||
| 393 | + if (!recognizer) { | ||
| 394 | + Napi::TypeError::New(env, "Please check your config!") | ||
| 395 | + .ThrowAsJavaScriptException(); | ||
| 396 | + | ||
| 397 | + return {}; | ||
| 398 | + } | ||
| 399 | + | ||
| 400 | + return Napi::External<SherpaOnnxOfflineRecognizer>::New( | ||
| 401 | + env, recognizer, | ||
| 402 | + [](Napi::Env env, SherpaOnnxOfflineRecognizer *recognizer) { | ||
| 403 | + DestroyOfflineRecognizer(recognizer); | ||
| 404 | + }); | ||
| 405 | +} | ||
| 406 | + | ||
| 407 | +static Napi::External<SherpaOnnxOfflineStream> CreateOfflineStreamWrapper( | ||
| 408 | + const Napi::CallbackInfo &info) { | ||
| 409 | + Napi::Env env = info.Env(); | ||
| 410 | + if (info.Length() != 1) { | ||
| 411 | + std::ostringstream os; | ||
| 412 | + os << "Expect only 1 argument. Given: " << info.Length(); | ||
| 413 | + | ||
| 414 | + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException(); | ||
| 415 | + | ||
| 416 | + return {}; | ||
| 417 | + } | ||
| 418 | + | ||
| 419 | + if (!info[0].IsExternal()) { | ||
| 420 | + Napi::TypeError::New( | ||
| 421 | + env, | ||
| 422 | + "You should pass an offline recognizer pointer as the only argument") | ||
| 423 | + .ThrowAsJavaScriptException(); | ||
| 424 | + | ||
| 425 | + return {}; | ||
| 426 | + } | ||
| 427 | + | ||
| 428 | + SherpaOnnxOfflineRecognizer *recognizer = | ||
| 429 | + info[0].As<Napi::External<SherpaOnnxOfflineRecognizer>>().Data(); | ||
| 430 | + | ||
| 431 | + SherpaOnnxOfflineStream *stream = CreateOfflineStream(recognizer); | ||
| 432 | + | ||
| 433 | + return Napi::External<SherpaOnnxOfflineStream>::New( | ||
| 434 | + env, stream, [](Napi::Env env, SherpaOnnxOfflineStream *stream) { | ||
| 435 | + DestroyOfflineStream(stream); | ||
| 436 | + }); | ||
| 437 | +} | ||
| 438 | + | ||
| 439 | +static void AcceptWaveformOfflineWrapper(const Napi::CallbackInfo &info) { | ||
| 440 | + Napi::Env env = info.Env(); | ||
| 441 | + | ||
| 442 | + if (info.Length() != 2) { | ||
| 443 | + std::ostringstream os; | ||
| 444 | + os << "Expect only 2 arguments. Given: " << info.Length(); | ||
| 445 | + | ||
| 446 | + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException(); | ||
| 447 | + | ||
| 448 | + return; | ||
| 449 | + } | ||
| 450 | + | ||
| 451 | + if (!info[0].IsExternal()) { | ||
| 452 | + Napi::TypeError::New(env, "Argument 0 should be an online stream pointer.") | ||
| 453 | + .ThrowAsJavaScriptException(); | ||
| 454 | + | ||
| 455 | + return; | ||
| 456 | + } | ||
| 457 | + | ||
| 458 | + SherpaOnnxOfflineStream *stream = | ||
| 459 | + info[0].As<Napi::External<SherpaOnnxOfflineStream>>().Data(); | ||
| 460 | + | ||
| 461 | + if (!info[1].IsObject()) { | ||
| 462 | + Napi::TypeError::New(env, "Argument 1 should be an object") | ||
| 463 | + .ThrowAsJavaScriptException(); | ||
| 464 | + | ||
| 465 | + return; | ||
| 466 | + } | ||
| 467 | + | ||
| 468 | + Napi::Object obj = info[1].As<Napi::Object>(); | ||
| 469 | + | ||
| 470 | + if (!obj.Has("samples")) { | ||
| 471 | + Napi::TypeError::New(env, "The argument object should have a field samples") | ||
| 472 | + .ThrowAsJavaScriptException(); | ||
| 473 | + | ||
| 474 | + return; | ||
| 475 | + } | ||
| 476 | + | ||
| 477 | + if (!obj.Get("samples").IsTypedArray()) { | ||
| 478 | + Napi::TypeError::New(env, "The object['samples'] should be a typed array") | ||
| 479 | + .ThrowAsJavaScriptException(); | ||
| 480 | + | ||
| 481 | + return; | ||
| 482 | + } | ||
| 483 | + | ||
| 484 | + if (!obj.Has("sampleRate")) { | ||
| 485 | + Napi::TypeError::New(env, | ||
| 486 | + "The argument object should have a field sampleRate") | ||
| 487 | + .ThrowAsJavaScriptException(); | ||
| 488 | + | ||
| 489 | + return; | ||
| 490 | + } | ||
| 491 | + | ||
| 492 | + if (!obj.Get("sampleRate").IsNumber()) { | ||
| 493 | + Napi::TypeError::New(env, "The object['samples'] should be a number") | ||
| 494 | + .ThrowAsJavaScriptException(); | ||
| 495 | + | ||
| 496 | + return; | ||
| 497 | + } | ||
| 498 | + | ||
| 499 | + Napi::Float32Array samples = obj.Get("samples").As<Napi::Float32Array>(); | ||
| 500 | + int32_t sample_rate = obj.Get("sampleRate").As<Napi::Number>().Int32Value(); | ||
| 501 | + | ||
| 502 | + AcceptWaveformOffline(stream, sample_rate, samples.Data(), | ||
| 503 | + samples.ElementLength()); | ||
| 504 | +} | ||
| 505 | + | ||
| 506 | +static void DecodeOfflineStreamWrapper(const Napi::CallbackInfo &info) { | ||
| 507 | + Napi::Env env = info.Env(); | ||
| 508 | + if (info.Length() != 2) { | ||
| 509 | + std::ostringstream os; | ||
| 510 | + os << "Expect only 2 arguments. Given: " << info.Length(); | ||
| 511 | + | ||
| 512 | + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException(); | ||
| 513 | + | ||
| 514 | + return; | ||
| 515 | + } | ||
| 516 | + | ||
| 517 | + if (!info[0].IsExternal()) { | ||
| 518 | + Napi::TypeError::New(env, | ||
| 519 | + "Argument 0 should be an offline recognizer pointer.") | ||
| 520 | + .ThrowAsJavaScriptException(); | ||
| 521 | + | ||
| 522 | + return; | ||
| 523 | + } | ||
| 524 | + | ||
| 525 | + if (!info[1].IsExternal()) { | ||
| 526 | + Napi::TypeError::New(env, "Argument 1 should be an offline stream pointer.") | ||
| 527 | + .ThrowAsJavaScriptException(); | ||
| 528 | + | ||
| 529 | + return; | ||
| 530 | + } | ||
| 531 | + | ||
| 532 | + SherpaOnnxOfflineRecognizer *recognizer = | ||
| 533 | + info[0].As<Napi::External<SherpaOnnxOfflineRecognizer>>().Data(); | ||
| 534 | + | ||
| 535 | + SherpaOnnxOfflineStream *stream = | ||
| 536 | + info[1].As<Napi::External<SherpaOnnxOfflineStream>>().Data(); | ||
| 537 | + | ||
| 538 | + DecodeOfflineStream(recognizer, stream); | ||
| 539 | +} | ||
| 540 | + | ||
| 541 | +static Napi::String GetOfflineStreamResultAsJsonWrapper( | ||
| 542 | + const Napi::CallbackInfo &info) { | ||
| 543 | + Napi::Env env = info.Env(); | ||
| 544 | + if (info.Length() != 1) { | ||
| 545 | + std::ostringstream os; | ||
| 546 | + os << "Expect only 1 argument. Given: " << info.Length(); | ||
| 547 | + | ||
| 548 | + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException(); | ||
| 549 | + | ||
| 550 | + return {}; | ||
| 551 | + } | ||
| 552 | + | ||
| 553 | + if (!info[0].IsExternal()) { | ||
| 554 | + Napi::TypeError::New(env, "Argument 0 should be an online stream pointer.") | ||
| 555 | + .ThrowAsJavaScriptException(); | ||
| 556 | + | ||
| 557 | + return {}; | ||
| 558 | + } | ||
| 559 | + | ||
| 560 | + SherpaOnnxOfflineStream *stream = | ||
| 561 | + info[0].As<Napi::External<SherpaOnnxOfflineStream>>().Data(); | ||
| 562 | + | ||
| 563 | + const char *json = GetOfflineStreamResultAsJson(stream); | ||
| 564 | + Napi::String s = Napi::String::New(env, json); | ||
| 565 | + | ||
| 566 | + DestroyOfflineStreamResultJson(json); | ||
| 567 | + | ||
| 568 | + return s; | ||
| 569 | +} | ||
| 570 | + | ||
| 571 | +void InitNonStreamingAsr(Napi::Env env, Napi::Object exports) { | ||
| 572 | + exports.Set(Napi::String::New(env, "createOfflineRecognizer"), | ||
| 573 | + Napi::Function::New(env, CreateOfflineRecognizerWrapper)); | ||
| 574 | + | ||
| 575 | + exports.Set(Napi::String::New(env, "createOfflineStream"), | ||
| 576 | + Napi::Function::New(env, CreateOfflineStreamWrapper)); | ||
| 577 | + | ||
| 578 | + exports.Set(Napi::String::New(env, "acceptWaveformOffline"), | ||
| 579 | + Napi::Function::New(env, AcceptWaveformOfflineWrapper)); | ||
| 580 | + | ||
| 581 | + exports.Set(Napi::String::New(env, "decodeOfflineStream"), | ||
| 582 | + Napi::Function::New(env, DecodeOfflineStreamWrapper)); | ||
| 583 | + | ||
| 584 | + exports.Set(Napi::String::New(env, "getOfflineStreamResultAsJson"), | ||
| 585 | + Napi::Function::New(env, GetOfflineStreamResultAsJsonWrapper)); | ||
| 586 | +} |
| @@ -4,15 +4,21 @@ | @@ -4,15 +4,21 @@ | ||
| 4 | #include "napi.h" // NOLINT | 4 | #include "napi.h" // NOLINT |
| 5 | 5 | ||
| 6 | void InitStreamingAsr(Napi::Env env, Napi::Object exports); | 6 | void InitStreamingAsr(Napi::Env env, Napi::Object exports); |
| 7 | + | ||
| 8 | +void InitNonStreamingAsr(Napi::Env env, Napi::Object exports); | ||
| 9 | + | ||
| 10 | +void InitVad(Napi::Env env, Napi::Object exports); | ||
| 11 | + | ||
| 7 | void InitWaveReader(Napi::Env env, Napi::Object exports); | 12 | void InitWaveReader(Napi::Env env, Napi::Object exports); |
| 13 | + | ||
| 8 | void InitWaveWriter(Napi::Env env, Napi::Object exports); | 14 | void InitWaveWriter(Napi::Env env, Napi::Object exports); |
| 9 | -void InitVad(Napi::Env env, Napi::Object exports); | ||
| 10 | 15 | ||
| 11 | Napi::Object Init(Napi::Env env, Napi::Object exports) { | 16 | Napi::Object Init(Napi::Env env, Napi::Object exports) { |
| 12 | InitStreamingAsr(env, exports); | 17 | InitStreamingAsr(env, exports); |
| 18 | + InitNonStreamingAsr(env, exports); | ||
| 19 | + InitVad(env, exports); | ||
| 13 | InitWaveReader(env, exports); | 20 | InitWaveReader(env, exports); |
| 14 | InitWaveWriter(env, exports); | 21 | InitWaveWriter(env, exports); |
| 15 | - InitVad(env, exports); | ||
| 16 | 22 | ||
| 17 | return exports; | 23 | return exports; |
| 18 | } | 24 | } |
| @@ -13,7 +13,7 @@ | @@ -13,7 +13,7 @@ | ||
| 13 | } | 13 | } |
| 14 | }; | 14 | }; |
| 15 | */ | 15 | */ |
| 16 | -static SherpaOnnxFeatureConfig GetFeatureConfig(Napi::Object obj) { | 16 | +SherpaOnnxFeatureConfig GetFeatureConfig(Napi::Object obj) { |
| 17 | SherpaOnnxFeatureConfig config; | 17 | SherpaOnnxFeatureConfig config; |
| 18 | memset(&config, 0, sizeof(config)); | 18 | memset(&config, 0, sizeof(config)); |
| 19 | 19 | ||
| @@ -113,6 +113,39 @@ GetOnlineZipformer2CtcModelConfig(Napi::Object obj) { | @@ -113,6 +113,39 @@ GetOnlineZipformer2CtcModelConfig(Napi::Object obj) { | ||
| 113 | return config; | 113 | return config; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | +static SherpaOnnxOnlineParaformerModelConfig GetOnlineParaformerModelConfig( | ||
| 117 | + Napi::Object obj) { | ||
| 118 | + SherpaOnnxOnlineParaformerModelConfig config; | ||
| 119 | + memset(&config, 0, sizeof(config)); | ||
| 120 | + | ||
| 121 | + if (!obj.Has("paraformer") || !obj.Get("paraformer").IsObject()) { | ||
| 122 | + return config; | ||
| 123 | + } | ||
| 124 | + | ||
| 125 | + Napi::Object o = obj.Get("paraformer").As<Napi::Object>(); | ||
| 126 | + | ||
| 127 | + if (o.Has("encoder") && o.Get("encoder").IsString()) { | ||
| 128 | + Napi::String encoder = o.Get("encoder").As<Napi::String>(); | ||
| 129 | + std::string s = encoder.Utf8Value(); | ||
| 130 | + char *p = new char[s.size() + 1]; | ||
| 131 | + std::copy(s.begin(), s.end(), p); | ||
| 132 | + p[s.size()] = 0; | ||
| 133 | + | ||
| 134 | + config.encoder = p; | ||
| 135 | + } | ||
| 136 | + | ||
| 137 | + if (o.Has("decoder") && o.Get("decoder").IsString()) { | ||
| 138 | + Napi::String decoder = o.Get("decoder").As<Napi::String>(); | ||
| 139 | + std::string s = decoder.Utf8Value(); | ||
| 140 | + char *p = new char[s.size() + 1]; | ||
| 141 | + std::copy(s.begin(), s.end(), p); | ||
| 142 | + p[s.size()] = 0; | ||
| 143 | + | ||
| 144 | + config.decoder = p; | ||
| 145 | + } | ||
| 146 | + return config; | ||
| 147 | +} | ||
| 148 | + | ||
| 116 | static SherpaOnnxOnlineModelConfig GetOnlineModelConfig(Napi::Object obj) { | 149 | static SherpaOnnxOnlineModelConfig GetOnlineModelConfig(Napi::Object obj) { |
| 117 | SherpaOnnxOnlineModelConfig config; | 150 | SherpaOnnxOnlineModelConfig config; |
| 118 | memset(&config, 0, sizeof(config)); | 151 | memset(&config, 0, sizeof(config)); |
| @@ -124,6 +157,7 @@ static SherpaOnnxOnlineModelConfig GetOnlineModelConfig(Napi::Object obj) { | @@ -124,6 +157,7 @@ static SherpaOnnxOnlineModelConfig GetOnlineModelConfig(Napi::Object obj) { | ||
| 124 | Napi::Object o = obj.Get("modelConfig").As<Napi::Object>(); | 157 | Napi::Object o = obj.Get("modelConfig").As<Napi::Object>(); |
| 125 | 158 | ||
| 126 | config.transducer = GetOnlineTransducerModelConfig(o); | 159 | config.transducer = GetOnlineTransducerModelConfig(o); |
| 160 | + config.paraformer = GetOnlineParaformerModelConfig(o); | ||
| 127 | config.zipformer2_ctc = GetOnlineZipformer2CtcModelConfig(o); | 161 | config.zipformer2_ctc = GetOnlineZipformer2CtcModelConfig(o); |
| 128 | 162 | ||
| 129 | if (o.Has("tokens") && o.Get("tokens").IsString()) { | 163 | if (o.Has("tokens") && o.Get("tokens").IsString()) { |
| @@ -290,35 +324,6 @@ static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper( | @@ -290,35 +324,6 @@ static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper( | ||
| 290 | 324 | ||
| 291 | c.ctc_fst_decoder_config = GetCtcFstDecoderConfig(config); | 325 | c.ctc_fst_decoder_config = GetCtcFstDecoderConfig(config); |
| 292 | 326 | ||
| 293 | -#if 0 | ||
| 294 | - printf("encoder: %s\n", c.model_config.transducer.encoder | ||
| 295 | - ? c.model_config.transducer.encoder | ||
| 296 | - : "no"); | ||
| 297 | - printf("decoder: %s\n", c.model_config.transducer.decoder | ||
| 298 | - ? c.model_config.transducer.decoder | ||
| 299 | - : "no"); | ||
| 300 | - printf("joiner: %s\n", c.model_config.transducer.joiner | ||
| 301 | - ? c.model_config.transducer.joiner | ||
| 302 | - : "no"); | ||
| 303 | - | ||
| 304 | - printf("tokens: %s\n", c.model_config.tokens ? c.model_config.tokens : "no"); | ||
| 305 | - printf("num_threads: %d\n", c.model_config.num_threads); | ||
| 306 | - printf("provider: %s\n", | ||
| 307 | - c.model_config.provider ? c.model_config.provider : "no"); | ||
| 308 | - printf("debug: %d\n", c.model_config.debug); | ||
| 309 | - printf("model_type: %s\n", | ||
| 310 | - c.model_config.model_type ? c.model_config.model_type : "no"); | ||
| 311 | - | ||
| 312 | - printf("decoding_method: %s\n", c.decoding_method ? c.decoding_method : "no"); | ||
| 313 | - printf("max_active_paths: %d\n", c.max_active_paths); | ||
| 314 | - printf("enable_endpoint: %d\n", c.enable_endpoint); | ||
| 315 | - printf("rule1_min_trailing_silence: %.3f\n", c.rule1_min_trailing_silence); | ||
| 316 | - printf("rule2_min_trailing_silence: %.3f\n", c.rule2_min_trailing_silence); | ||
| 317 | - printf("rule3_min_utterance_length: %.3f\n", c.rule3_min_utterance_length); | ||
| 318 | - printf("hotwords_file: %s\n", c.hotwords_file ? c.hotwords_file : "no"); | ||
| 319 | - printf("hotwords_score: %.3f\n", c.hotwords_score); | ||
| 320 | -#endif | ||
| 321 | - | ||
| 322 | SherpaOnnxOnlineRecognizer *recognizer = CreateOnlineRecognizer(&c); | 327 | SherpaOnnxOnlineRecognizer *recognizer = CreateOnlineRecognizer(&c); |
| 323 | 328 | ||
| 324 | if (c.model_config.transducer.encoder) { | 329 | if (c.model_config.transducer.encoder) { |
| @@ -333,6 +338,14 @@ static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper( | @@ -333,6 +338,14 @@ static Napi::External<SherpaOnnxOnlineRecognizer> CreateOnlineRecognizerWrapper( | ||
| 333 | delete[] c.model_config.transducer.joiner; | 338 | delete[] c.model_config.transducer.joiner; |
| 334 | } | 339 | } |
| 335 | 340 | ||
| 341 | + if (c.model_config.paraformer.encoder) { | ||
| 342 | + delete[] c.model_config.paraformer.encoder; | ||
| 343 | + } | ||
| 344 | + | ||
| 345 | + if (c.model_config.paraformer.decoder) { | ||
| 346 | + delete[] c.model_config.paraformer.decoder; | ||
| 347 | + } | ||
| 348 | + | ||
| 336 | if (c.model_config.zipformer2_ctc.model) { | 349 | if (c.model_config.zipformer2_ctc.model) { |
| 337 | delete[] c.model_config.zipformer2_ctc.model; | 350 | delete[] c.model_config.zipformer2_ctc.model; |
| 338 | } | 351 | } |
| @@ -389,7 +402,8 @@ static Napi::External<SherpaOnnxOnlineStream> CreateOnlineStreamWrapper( | @@ -389,7 +402,8 @@ static Napi::External<SherpaOnnxOnlineStream> CreateOnlineStreamWrapper( | ||
| 389 | 402 | ||
| 390 | if (!info[0].IsExternal()) { | 403 | if (!info[0].IsExternal()) { |
| 391 | Napi::TypeError::New( | 404 | Napi::TypeError::New( |
| 392 | - env, "You should pass a recognizer pointer as the only argument") | 405 | + env, |
| 406 | + "You should pass an online recognizer pointer as the only argument") | ||
| 393 | .ThrowAsJavaScriptException(); | 407 | .ThrowAsJavaScriptException(); |
| 394 | 408 | ||
| 395 | return {}; | 409 | return {}; |
-
请 注册 或 登录 后发表评论