Fangjun Kuang
Committed by GitHub

Add spoken language identification for node-addon-api (#872)

@@ -18,6 +18,8 @@ fi @@ -18,6 +18,8 @@ fi
18 SHERPA_ONNX_VERSION=$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2) 18 SHERPA_ONNX_VERSION=$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
19 echo "SHERPA_ONNX_VERSION $SHERPA_ONNX_VERSION" 19 echo "SHERPA_ONNX_VERSION $SHERPA_ONNX_VERSION"
20 20
  21 +# SHERPA_ONNX_VERSION=1.0.20
  22 +
21 if [ -z $owner ]; then 23 if [ -z $owner ]; then
22 owner=k2-fsa 24 owner=k2-fsa
23 fi 25 fi
@@ -6,6 +6,20 @@ d=nodejs-addon-examples @@ -6,6 +6,20 @@ d=nodejs-addon-examples
6 echo "dir: $d" 6 echo "dir: $d"
7 cd $d 7 cd $d
8 8
  9 +echo "----------spoken language identification----------"
  10 +
  11 +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.tar.bz2
  12 +tar xvf sherpa-onnx-whisper-tiny.tar.bz2
  13 +rm sherpa-onnx-whisper-tiny.tar.bz2
  14 +
  15 +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/spoken-language-identification-test-wavs.tar.bz2
  16 +tar xvf spoken-language-identification-test-wavs.tar.bz2
  17 +rm spoken-language-identification-test-wavs.tar.bz2
  18 +
  19 +node ./test_spoken_language_identification.js
  20 +rm -rf sherpa-onnx-whisper-tiny
  21 +rm -rf spoken-language-identification-test-wavs
  22 +
9 echo "----------streaming asr----------" 23 echo "----------streaming asr----------"
10 24
11 curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2 25 curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
@@ -52,6 +52,7 @@ jobs: @@ -52,6 +52,7 @@ jobs:
52 52
53 SHERPA_ONNX_VERSION=$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2) 53 SHERPA_ONNX_VERSION=$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
54 echo "SHERPA_ONNX_VERSION $SHERPA_ONNX_VERSION" 54 echo "SHERPA_ONNX_VERSION $SHERPA_ONNX_VERSION"
  55 + # SHERPA_ONNX_VERSION=1.0.20
55 56
56 src_dir=.github/scripts/node-addon 57 src_dir=.github/scripts/node-addon
57 sed -i.bak s/SHERPA_ONNX_VERSION/$SHERPA_ONNX_VERSION/g $src_dir/package.json 58 sed -i.bak s/SHERPA_ONNX_VERSION/$SHERPA_ONNX_VERSION/g $src_dir/package.json
@@ -183,3 +183,21 @@ rm vits-icefall-zh-aishell3.tar.bz2 @@ -183,3 +183,21 @@ rm vits-icefall-zh-aishell3.tar.bz2
183 183
184 node ./test_tts_non_streaming_vits_zh_aishell3.js 184 node ./test_tts_non_streaming_vits_zh_aishell3.js
185 ``` 185 ```
  186 +
  187 +## Spoken language identification with Whisper multi-lingual models
  188 +
  189 +```bash
  190 +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.tar.bz2
  191 +tar xvf sherpa-onnx-whisper-tiny.tar.bz2
  192 +rm sherpa-onnx-whisper-tiny.tar.bz2
  193 +
  194 +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/spoken-language-identification-test-wavs.tar.bz2
  195 +tar xvf spoken-language-identification-test-wavs.tar.bz2
  196 +rm spoken-language-identification-test-wavs.tar.bz2
  197 +
  198 +node ./test_spoken_language_identification.js
  199 +
  200 +# To run VAD + spoken language identification using a microphone
  201 +npm install naudiodon2
  202 +node ./test_vad_spoken_language_identification_microphone.js
  203 +```
  1 +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
  2 +
  3 +const sherpa_onnx = require('sherpa-onnx-node');
  4 +
  5 +function createSpokenLanguageID() {
  6 + const config = {
  7 + whisper: {
  8 + encoder: './sherpa-onnx-whisper-tiny/tiny-encoder.int8.onnx',
  9 + decoder: './sherpa-onnx-whisper-tiny/tiny-decoder.int8.onnx',
  10 + },
  11 + debug: true,
  12 + numThreads: 1,
  13 + provider: 'cpu',
  14 + };
  15 + return new sherpa_onnx.SpokenLanguageIdentification(config);
  16 +}
  17 +
  18 +const slid = createSpokenLanguageID();
  19 +
  20 +const testWaves = [
  21 + './spoken-language-identification-test-wavs/ar-arabic.wav',
  22 + './spoken-language-identification-test-wavs/de-german.wav',
  23 + './spoken-language-identification-test-wavs/en-english.wav',
  24 + './spoken-language-identification-test-wavs/fr-french.wav',
  25 + './spoken-language-identification-test-wavs/pt-portuguese.wav',
  26 + './spoken-language-identification-test-wavs/es-spanish.wav',
  27 + './spoken-language-identification-test-wavs/zh-chinese.wav',
  28 +];
  29 +
  30 +const display = new Intl.DisplayNames(['en'], {type: 'language'})
  31 +
  32 +for (let f of testWaves) {
  33 + const stream = slid.createStream();
  34 +
  35 + const wave = sherpa_onnx.readWave(f);
  36 + stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
  37 +
  38 + const lang = slid.compute(stream);
  39 + console.log(f.split('/')[2], lang, display.of(lang));
  40 +}
@@ -26,7 +26,7 @@ function createVad() { @@ -26,7 +26,7 @@ function createVad() {
26 return new sherpa_onnx.Vad(config, bufferSizeInSeconds); 26 return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
27 } 27 }
28 28
29 -vad = createVad(); 29 +const vad = createVad();
30 30
31 const bufferSizeInSeconds = 30; 31 const bufferSizeInSeconds = 30;
32 const buffer = 32 const buffer =
  1 +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
  2 +
  3 +const portAudio = require('naudiodon2');
  4 +// console.log(portAudio.getDevices());
  5 +
  6 +const sherpa_onnx = require('sherpa-onnx-node');
  7 +
  8 +function createVad() {
  9 + // please download silero_vad.onnx from
  10 + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
  11 + const config = {
  12 + sileroVad: {
  13 + model: './silero_vad.onnx',
  14 + threshold: 0.5,
  15 + minSpeechDuration: 0.25,
  16 + minSilenceDuration: 0.5,
  17 + windowSize: 512,
  18 + },
  19 + sampleRate: 16000,
  20 + debug: true,
  21 + numThreads: 1,
  22 + };
  23 +
  24 + const bufferSizeInSeconds = 60;
  25 +
  26 + return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
  27 +}
  28 +
  29 +// Please download test files from
  30 +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
  31 +function createSpokenLanguageID() {
  32 + const config = {
  33 + whisper: {
  34 + encoder: './sherpa-onnx-whisper-tiny/tiny-encoder.int8.onnx',
  35 + decoder: './sherpa-onnx-whisper-tiny/tiny-decoder.int8.onnx',
  36 + },
  37 + debug: true,
  38 + numThreads: 1,
  39 + provider: 'cpu',
  40 + };
  41 + return new sherpa_onnx.SpokenLanguageIdentification(config);
  42 +}
  43 +
  44 +const slid = createSpokenLanguageID();
  45 +const vad = createVad();
  46 +
  47 +const display = new Intl.DisplayNames(['en'], {type: 'language'})
  48 +
  49 +const bufferSizeInSeconds = 30;
  50 +const buffer =
  51 + new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
  52 +
  53 +
  54 +const ai = new portAudio.AudioIO({
  55 + inOptions: {
  56 + channelCount: 1,
  57 + closeOnError: true, // Close the stream if an audio error is detected, if
  58 + // set false then just log the error
  59 + deviceId: -1, // Use -1 or omit the deviceId to select the default device
  60 + sampleFormat: portAudio.SampleFormatFloat32,
  61 + sampleRate: vad.config.sampleRate,
  62 + }
  63 +});
  64 +
  65 +let printed = false;
  66 +let index = 0;
  67 +ai.on('data', data => {
  68 + const windowSize = vad.config.sileroVad.windowSize;
  69 + buffer.push(new Float32Array(data.buffer));
  70 + while (buffer.size() > windowSize) {
  71 + const samples = buffer.get(buffer.head(), windowSize);
  72 + buffer.pop(windowSize);
  73 + vad.acceptWaveform(samples)
  74 + if (vad.isDetected() && !printed) {
  75 + console.log(`${index}: Detected speech`)
  76 + printed = true;
  77 + }
  78 +
  79 + if (!vad.isDetected()) {
  80 + printed = false;
  81 + }
  82 +
  83 + while (!vad.isEmpty()) {
  84 + const segment = vad.front();
  85 + vad.pop();
  86 +
  87 + const stream = slid.createStream();
  88 + stream.acceptWaveform(
  89 + {samples: segment.samples, sampleRate: vad.config.sampleRate});
  90 + const lang = slid.compute(stream);
  91 + const fullLang = display.of(lang);
  92 +
  93 + const filename = `${index}-${fullLang}-${
  94 + new Date()
  95 + .toLocaleTimeString('en-US', {hour12: false})
  96 + .split(' ')[0]}.wav`;
  97 + sherpa_onnx.writeWave(
  98 + filename,
  99 + {samples: segment.samples, sampleRate: vad.config.sampleRate});
  100 + const duration = segment.samples.length / vad.config.sampleRate;
  101 + console.log(`${index} End of speech. Duration: ${
  102 + duration} seconds.\n Detected language: ${fullLang}`);
  103 + console.log(`Saved to ${filename}`);
  104 + index += 1;
  105 + }
  106 + }
  107 +});
  108 +
  109 +ai.on('close', () => {
  110 + console.log('Free resources');
  111 +});
  112 +
  113 +ai.start();
  114 +console.log('Started! Please speak')
@@ -21,6 +21,7 @@ set(srcs @@ -21,6 +21,7 @@ set(srcs
21 src/non-streaming-asr.cc 21 src/non-streaming-asr.cc
22 src/non-streaming-tts.cc 22 src/non-streaming-tts.cc
23 src/sherpa-onnx-node-addon-api.cc 23 src/sherpa-onnx-node-addon-api.cc
  24 + src/spoken-language-identification.cc
24 src/streaming-asr.cc 25 src/streaming-asr.cc
25 src/vad.cc 26 src/vad.cc
26 src/wave-reader.cc 27 src/wave-reader.cc
@@ -37,4 +37,5 @@ class OfflineRecognizer { @@ -37,4 +37,5 @@ class OfflineRecognizer {
37 37
38 module.exports = { 38 module.exports = {
39 OfflineRecognizer, 39 OfflineRecognizer,
  40 + OfflineStream,
40 } 41 }
@@ -3,6 +3,7 @@ const streaming_asr = require('./streaming-asr.js'); @@ -3,6 +3,7 @@ const streaming_asr = require('./streaming-asr.js');
3 const non_streaming_asr = require('./non-streaming-asr.js'); 3 const non_streaming_asr = require('./non-streaming-asr.js');
4 const non_streaming_tts = require('./non-streaming-tts.js'); 4 const non_streaming_tts = require('./non-streaming-tts.js');
5 const vad = require('./vad.js'); 5 const vad = require('./vad.js');
  6 +const slid = require('./spoken-language-identification.js');
6 7
7 module.exports = { 8 module.exports = {
8 OnlineRecognizer: streaming_asr.OnlineRecognizer, 9 OnlineRecognizer: streaming_asr.OnlineRecognizer,
@@ -13,4 +14,5 @@ module.exports = { @@ -13,4 +14,5 @@ module.exports = {
13 Display: streaming_asr.Display, 14 Display: streaming_asr.Display,
14 Vad: vad.Vad, 15 Vad: vad.Vad,
15 CircularBuffer: vad.CircularBuffer, 16 CircularBuffer: vad.CircularBuffer,
  17 + SpokenLanguageIdentification: slid.SpokenLanguageIdentification,
16 } 18 }
  1 +const addon = require('./addon.js');
  2 +const non_streaming_asr = require('./non-streaming-asr.js');
  3 +
  4 +class SpokenLanguageIdentification {
  5 + constructor(config) {
  6 + this.handle = addon.createSpokenLanguageIdentification(config);
  7 + this.config = config;
  8 + }
  9 +
  10 + createStream() {
  11 + return new non_streaming_asr.OfflineStream(
  12 + addon.createSpokenLanguageIdentificationOfflineStream(this.handle));
  13 + }
  14 +
  15 + // return a string containing the language code (2 characters),
  16 + // e.g., en, de, fr, es, zh
  17 + // en -> English
  18 + // de -> German
  19 + // fr -> French
  20 + // es -> Spanish
  21 + // zh -> Chinese
  22 + compute(stream) {
  23 + return addon.spokenLanguageIdentificationCompute(
  24 + this.handle, stream.handle);
  25 + }
  26 +}
  27 +
  28 +module.exports = {
  29 + SpokenLanguageIdentification,
  30 +}
@@ -15,6 +15,8 @@ void InitWaveReader(Napi::Env env, Napi::Object exports); @@ -15,6 +15,8 @@ void InitWaveReader(Napi::Env env, Napi::Object exports);
15 15
16 void InitWaveWriter(Napi::Env env, Napi::Object exports); 16 void InitWaveWriter(Napi::Env env, Napi::Object exports);
17 17
  18 +void InitSpokenLanguageID(Napi::Env env, Napi::Object exports);
  19 +
18 Napi::Object Init(Napi::Env env, Napi::Object exports) { 20 Napi::Object Init(Napi::Env env, Napi::Object exports) {
19 InitStreamingAsr(env, exports); 21 InitStreamingAsr(env, exports);
20 InitNonStreamingAsr(env, exports); 22 InitNonStreamingAsr(env, exports);
@@ -22,6 +24,7 @@ Napi::Object Init(Napi::Env env, Napi::Object exports) { @@ -22,6 +24,7 @@ Napi::Object Init(Napi::Env env, Napi::Object exports) {
22 InitVad(env, exports); 24 InitVad(env, exports);
23 InitWaveReader(env, exports); 25 InitWaveReader(env, exports);
24 InitWaveWriter(env, exports); 26 InitWaveWriter(env, exports);
  27 + InitSpokenLanguageID(env, exports);
25 28
26 return exports; 29 return exports;
27 } 30 }
  1 +// scripts/node-addon-api/src/spoken-language-identification.cc
  2 +//
  3 +// Copyright (c) 2024 Xiaomi Corporation
  4 +
  5 +#include <sstream>
  6 +
  7 +#include "napi.h" // NOLINT
  8 +#include "sherpa-onnx/c-api/c-api.h"
  9 +
  10 +static SherpaOnnxSpokenLanguageIdentificationWhisperConfig
  11 +GetSpokenLanguageIdentificationWhisperConfig(Napi::Object obj) {
  12 + SherpaOnnxSpokenLanguageIdentificationWhisperConfig c;
  13 + memset(&c, 0, sizeof(c));
  14 +
  15 + if (!obj.Has("whisper") || !obj.Get("whisper").IsObject()) {
  16 + return c;
  17 + }
  18 +
  19 + Napi::Object o = obj.Get("whisper").As<Napi::Object>();
  20 +
  21 + if (o.Has("encoder") && o.Get("encoder").IsString()) {
  22 + Napi::String encoder = o.Get("encoder").As<Napi::String>();
  23 + std::string s = encoder.Utf8Value();
  24 + char *p = new char[s.size() + 1];
  25 + std::copy(s.begin(), s.end(), p);
  26 + p[s.size()] = 0;
  27 +
  28 + c.encoder = p;
  29 + }
  30 +
  31 + if (o.Has("decoder") && o.Get("decoder").IsString()) {
  32 + Napi::String decoder = o.Get("decoder").As<Napi::String>();
  33 + std::string s = decoder.Utf8Value();
  34 + char *p = new char[s.size() + 1];
  35 + std::copy(s.begin(), s.end(), p);
  36 + p[s.size()] = 0;
  37 +
  38 + c.decoder = p;
  39 + }
  40 +
  41 + if (o.Has("tailPaddings") && o.Get("tailPaddings").IsNumber()) {
  42 + c.tail_paddings = o.Get("tailPaddings").As<Napi::Number>().Int32Value();
  43 + }
  44 +
  45 + return c;
  46 +}
  47 +
  48 +static Napi::External<SherpaOnnxSpokenLanguageIdentification>
  49 +CreateSpokenLanguageIdentificationWrapper(const Napi::CallbackInfo &info) {
  50 + Napi::Env env = info.Env();
  51 + if (info.Length() != 1) {
  52 + std::ostringstream os;
  53 + os << "Expect only 1 argument. Given: " << info.Length();
  54 +
  55 + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException();
  56 +
  57 + return {};
  58 + }
  59 +
  60 + if (!info[0].IsObject()) {
  61 + Napi::TypeError::New(env, "You should pass an object as the only argument.")
  62 + .ThrowAsJavaScriptException();
  63 +
  64 + return {};
  65 + }
  66 +
  67 + Napi::Object o = info[0].As<Napi::Object>();
  68 +
  69 + SherpaOnnxSpokenLanguageIdentificationConfig c;
  70 + memset(&c, 0, sizeof(c));
  71 + c.whisper = GetSpokenLanguageIdentificationWhisperConfig(o);
  72 +
  73 + if (o.Has("numThreads") && o.Get("numThreads").IsNumber()) {
  74 + c.num_threads = o.Get("numThreads").As<Napi::Number>().Int32Value();
  75 + }
  76 +
  77 + if (o.Has("debug") &&
  78 + (o.Get("debug").IsNumber() || o.Get("debug").IsBoolean())) {
  79 + if (o.Get("debug").IsBoolean()) {
  80 + c.debug = o.Get("debug").As<Napi::Boolean>().Value();
  81 + } else {
  82 + c.debug = o.Get("debug").As<Napi::Number>().Int32Value();
  83 + }
  84 + }
  85 +
  86 + if (o.Has("provider") && o.Get("provider").IsString()) {
  87 + Napi::String provider = o.Get("provider").As<Napi::String>();
  88 + std::string s = provider.Utf8Value();
  89 + char *p = new char[s.size() + 1];
  90 + std::copy(s.begin(), s.end(), p);
  91 + p[s.size()] = 0;
  92 +
  93 + c.provider = p;
  94 + }
  95 +
  96 + const SherpaOnnxSpokenLanguageIdentification *slid =
  97 + SherpaOnnxCreateSpokenLanguageIdentification(&c);
  98 +
  99 + if (c.whisper.encoder) {
  100 + delete[] c.whisper.encoder;
  101 + }
  102 +
  103 + if (c.whisper.decoder) {
  104 + delete[] c.whisper.decoder;
  105 + }
  106 +
  107 + if (c.provider) {
  108 + delete[] c.provider;
  109 + }
  110 +
  111 + if (!slid) {
  112 + Napi::TypeError::New(env, "Please check your config!")
  113 + .ThrowAsJavaScriptException();
  114 +
  115 + return {};
  116 + }
  117 +
  118 + return Napi::External<SherpaOnnxSpokenLanguageIdentification>::New(
  119 + env, const_cast<SherpaOnnxSpokenLanguageIdentification *>(slid),
  120 + [](Napi::Env env, SherpaOnnxSpokenLanguageIdentification *slid) {
  121 + SherpaOnnxDestroySpokenLanguageIdentification(slid);
  122 + });
  123 +}
  124 +
  125 +static Napi::External<SherpaOnnxOfflineStream>
  126 +SpokenLanguageIdentificationCreateOfflineStreamWrapper(
  127 + const Napi::CallbackInfo &info) {
  128 + Napi::Env env = info.Env();
  129 + if (info.Length() != 1) {
  130 + std::ostringstream os;
  131 + os << "Expect only 1 argument. Given: " << info.Length();
  132 +
  133 + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException();
  134 +
  135 + return {};
  136 + }
  137 +
  138 + if (!info[0].IsExternal()) {
  139 + Napi::TypeError::New(
  140 + env,
  141 + "You should pass an offline language ID pointer as the only argument")
  142 + .ThrowAsJavaScriptException();
  143 +
  144 + return {};
  145 + }
  146 +
  147 + SherpaOnnxSpokenLanguageIdentification *slid =
  148 + info[0]
  149 + .As<Napi::External<SherpaOnnxSpokenLanguageIdentification>>()
  150 + .Data();
  151 +
  152 + SherpaOnnxOfflineStream *stream =
  153 + SherpaOnnxSpokenLanguageIdentificationCreateOfflineStream(slid);
  154 +
  155 + return Napi::External<SherpaOnnxOfflineStream>::New(
  156 + env, stream, [](Napi::Env env, SherpaOnnxOfflineStream *stream) {
  157 + DestroyOfflineStream(stream);
  158 + });
  159 +}
  160 +
  161 +static Napi::String SpokenLanguageIdentificationComputeWrapper(
  162 + const Napi::CallbackInfo &info) {
  163 + Napi::Env env = info.Env();
  164 + if (info.Length() != 2) {
  165 + std::ostringstream os;
  166 + os << "Expect only 2 arguments. Given: " << info.Length();
  167 +
  168 + Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException();
  169 +
  170 + return {};
  171 + }
  172 +
  173 + if (!info[0].IsExternal()) {
  174 + Napi::TypeError::New(
  175 + env, "Argument 0 should be an offline spoken language ID pointer.")
  176 + .ThrowAsJavaScriptException();
  177 +
  178 + return {};
  179 + }
  180 +
  181 + if (!info[1].IsExternal()) {
  182 + Napi::TypeError::New(env, "Argument 1 should be an offline stream pointer.")
  183 + .ThrowAsJavaScriptException();
  184 +
  185 + return {};
  186 + }
  187 +
  188 + SherpaOnnxSpokenLanguageIdentification *slid =
  189 + info[0]
  190 + .As<Napi::External<SherpaOnnxSpokenLanguageIdentification>>()
  191 + .Data();
  192 +
  193 + SherpaOnnxOfflineStream *stream =
  194 + info[1].As<Napi::External<SherpaOnnxOfflineStream>>().Data();
  195 +
  196 + const SherpaOnnxSpokenLanguageIdentificationResult *r =
  197 + SherpaOnnxSpokenLanguageIdentificationCompute(slid, stream);
  198 +
  199 + std::string lang = r->lang;
  200 + SherpaOnnxDestroySpokenLanguageIdentificationResult(r);
  201 +
  202 + return Napi::String::New(env, lang);
  203 +}
  204 +
  205 +void InitSpokenLanguageID(Napi::Env env, Napi::Object exports) {
  206 + exports.Set(
  207 + Napi::String::New(env, "createSpokenLanguageIdentification"),
  208 + Napi::Function::New(env, CreateSpokenLanguageIdentificationWrapper));
  209 +
  210 + exports.Set(
  211 + Napi::String::New(env, "createSpokenLanguageIdentificationOfflineStream"),
  212 + Napi::Function::New(
  213 + env, SpokenLanguageIdentificationCreateOfflineStreamWrapper));
  214 +
  215 + exports.Set(
  216 + Napi::String::New(env, "spokenLanguageIdentificationCompute"),
  217 + Napi::Function::New(env, SpokenLanguageIdentificationComputeWrapper));
  218 +}