Fangjun Kuang
Committed by GitHub

Add VAD + Non-streaming ASR example for JavaScript API. (#1170)

@@ -10,6 +10,19 @@ arch=$(node -p "require('os').arch()") @@ -10,6 +10,19 @@ arch=$(node -p "require('os').arch()")
10 platform=$(node -p "require('os').platform()") 10 platform=$(node -p "require('os').platform()")
11 node_version=$(node -p "process.versions.node.split('.')[0]") 11 node_version=$(node -p "process.versions.node.split('.')[0]")
12 12
  13 +echo "----------non-streaming asr + vad----------"
  14 +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
  15 +tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
  16 +rm sherpa-onnx-whisper-tiny.en.tar.bz2
  17 +
  18 +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
  19 +curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
  20 +
  21 +node ./test_vad_with_non_streaming_asr_whisper.js
  22 +rm -rf sherpa-onnx-whisper*
  23 +rm *.wav
  24 +rm *.onnx
  25 +
13 echo "----------asr----------" 26 echo "----------asr----------"
14 27
15 if [[ $arch != "ia32" && $platform != "win32" ]]; then 28 if [[ $arch != "ia32" && $platform != "win32" ]]; then
@@ -112,3 +112,4 @@ sherpa-onnx-telespeech-ctc-* @@ -112,3 +112,4 @@ sherpa-onnx-telespeech-ctc-*
112 .ccache 112 .ccache
113 lib*.a 113 lib*.a
114 sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17 114 sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17
  115 +*.bak
  1 +## 1.10.18
  2 +
  3 +* Fix the case when recognition results contain the symbol `"`. It caused
  4 + issues when converting results to a json string.
  5 +
1 ## 1.10.17 6 ## 1.10.17
2 7
3 * Support SenseVoice CTC models. 8 * Support SenseVoice CTC models.
@@ -11,7 +11,7 @@ project(sherpa-onnx) @@ -11,7 +11,7 @@ project(sherpa-onnx)
11 # ./nodejs-addon-examples 11 # ./nodejs-addon-examples
12 # ./dart-api-examples/ 12 # ./dart-api-examples/
13 # ./CHANGELOG.md 13 # ./CHANGELOG.md
14 -set(SHERPA_ONNX_VERSION "1.10.17") 14 +set(SHERPA_ONNX_VERSION "1.10.18")
15 15
16 # Disable warning about 16 # Disable warning about
17 # 17 #
@@ -9,7 +9,7 @@ environment: @@ -9,7 +9,7 @@ environment:
9 sdk: ^3.4.0 9 sdk: ^3.4.0
10 10
11 dependencies: 11 dependencies:
12 - sherpa_onnx: ^1.10.17 12 + sherpa_onnx: ^1.10.18
13 # sherpa_onnx: 13 # sherpa_onnx:
14 # path: ../../flutter/sherpa_onnx 14 # path: ../../flutter/sherpa_onnx
15 path: ^1.9.0 15 path: ^1.9.0
@@ -10,7 +10,7 @@ environment: @@ -10,7 +10,7 @@ environment:
10 10
11 # Add regular dependencies here. 11 # Add regular dependencies here.
12 dependencies: 12 dependencies:
13 - sherpa_onnx: ^1.10.17 13 + sherpa_onnx: ^1.10.18
14 path: ^1.9.0 14 path: ^1.9.0
15 args: ^2.5.0 15 args: ^2.5.0
16 16
@@ -11,7 +11,7 @@ environment: @@ -11,7 +11,7 @@ environment:
11 11
12 # Add regular dependencies here. 12 # Add regular dependencies here.
13 dependencies: 13 dependencies:
14 - sherpa_onnx: ^1.10.17 14 + sherpa_onnx: ^1.10.18
15 path: ^1.9.0 15 path: ^1.9.0
16 args: ^2.5.0 16 args: ^2.5.0
17 17
@@ -8,7 +8,7 @@ environment: @@ -8,7 +8,7 @@ environment:
8 8
9 # Add regular dependencies here. 9 # Add regular dependencies here.
10 dependencies: 10 dependencies:
11 - sherpa_onnx: ^1.10.17 11 + sherpa_onnx: ^1.10.18
12 path: ^1.9.0 12 path: ^1.9.0
13 args: ^2.5.0 13 args: ^2.5.0
14 14
@@ -9,7 +9,7 @@ environment: @@ -9,7 +9,7 @@ environment:
9 sdk: ^3.4.0 9 sdk: ^3.4.0
10 10
11 dependencies: 11 dependencies:
12 - sherpa_onnx: ^1.10.17 12 + sherpa_onnx: ^1.10.18
13 path: ^1.9.0 13 path: ^1.9.0
14 args: ^2.5.0 14 args: ^2.5.0
15 15
@@ -5,7 +5,7 @@ description: > @@ -5,7 +5,7 @@ description: >
5 5
6 publish_to: 'none' 6 publish_to: 'none'
7 7
8 -version: 1.10.17 8 +version: 1.10.18
9 9
10 topics: 10 topics:
11 - speech-recognition 11 - speech-recognition
@@ -30,7 +30,7 @@ dependencies: @@ -30,7 +30,7 @@ dependencies:
30 record: ^5.1.0 30 record: ^5.1.0
31 url_launcher: ^6.2.6 31 url_launcher: ^6.2.6
32 32
33 - sherpa_onnx: ^1.10.17 33 + sherpa_onnx: ^1.10.18
34 # sherpa_onnx: 34 # sherpa_onnx:
35 # path: ../../flutter/sherpa_onnx 35 # path: ../../flutter/sherpa_onnx
36 36
@@ -5,7 +5,7 @@ description: > @@ -5,7 +5,7 @@ description: >
5 5
6 publish_to: 'none' # Remove this line if you wish to publish to pub.dev 6 publish_to: 'none' # Remove this line if you wish to publish to pub.dev
7 7
8 -version: 1.10.17 8 +version: 1.10.18
9 9
10 environment: 10 environment:
11 sdk: '>=3.4.0 <4.0.0' 11 sdk: '>=3.4.0 <4.0.0'
@@ -17,7 +17,7 @@ dependencies: @@ -17,7 +17,7 @@ dependencies:
17 cupertino_icons: ^1.0.6 17 cupertino_icons: ^1.0.6
18 path_provider: ^2.1.3 18 path_provider: ^2.1.3
19 path: ^1.9.0 19 path: ^1.9.0
20 - sherpa_onnx: ^1.10.17 20 + sherpa_onnx: ^1.10.18
21 url_launcher: ^6.2.6 21 url_launcher: ^6.2.6
22 audioplayers: ^5.0.0 22 audioplayers: ^5.0.0
23 23
@@ -17,7 +17,7 @@ topics: @@ -17,7 +17,7 @@ topics:
17 - voice-activity-detection 17 - voice-activity-detection
18 18
19 # remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx_macos.podspec 19 # remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx_macos.podspec
20 -version: 1.10.17 20 +version: 1.10.18
21 21
22 homepage: https://github.com/k2-fsa/sherpa-onnx 22 homepage: https://github.com/k2-fsa/sherpa-onnx
23 23
@@ -30,23 +30,23 @@ dependencies: @@ -30,23 +30,23 @@ dependencies:
30 flutter: 30 flutter:
31 sdk: flutter 31 sdk: flutter
32 32
33 - sherpa_onnx_android: ^1.10.17 33 + sherpa_onnx_android: ^1.10.18
34 # sherpa_onnx_android: 34 # sherpa_onnx_android:
35 # path: ../sherpa_onnx_android 35 # path: ../sherpa_onnx_android
36 36
37 - sherpa_onnx_macos: ^1.10.17 37 + sherpa_onnx_macos: ^1.10.18
38 # sherpa_onnx_macos: 38 # sherpa_onnx_macos:
39 # path: ../sherpa_onnx_macos 39 # path: ../sherpa_onnx_macos
40 40
41 - sherpa_onnx_linux: ^1.10.17 41 + sherpa_onnx_linux: ^1.10.18
42 # sherpa_onnx_linux: 42 # sherpa_onnx_linux:
43 # path: ../sherpa_onnx_linux 43 # path: ../sherpa_onnx_linux
44 # 44 #
45 - sherpa_onnx_windows: ^1.10.17 45 + sherpa_onnx_windows: ^1.10.18
46 # sherpa_onnx_windows: 46 # sherpa_onnx_windows:
47 # path: ../sherpa_onnx_windows 47 # path: ../sherpa_onnx_windows
48 48
49 - sherpa_onnx_ios: ^1.10.17 49 + sherpa_onnx_ios: ^1.10.18
50 # sherpa_onnx_ios: 50 # sherpa_onnx_ios:
51 # path: ../sherpa_onnx_ios 51 # path: ../sherpa_onnx_ios
52 52
@@ -7,7 +7,7 @@ @@ -7,7 +7,7 @@
7 # https://groups.google.com/g/dart-ffi/c/nUATMBy7r0c 7 # https://groups.google.com/g/dart-ffi/c/nUATMBy7r0c
8 Pod::Spec.new do |s| 8 Pod::Spec.new do |s|
9 s.name = 'sherpa_onnx_ios' 9 s.name = 'sherpa_onnx_ios'
10 - s.version = '1.10.17' 10 + s.version = '1.10.18'
11 s.summary = 'A new Flutter FFI plugin project.' 11 s.summary = 'A new Flutter FFI plugin project.'
12 s.description = <<-DESC 12 s.description = <<-DESC
13 A new Flutter FFI plugin project. 13 A new Flutter FFI plugin project.
@@ -4,7 +4,7 @@ @@ -4,7 +4,7 @@
4 # 4 #
5 Pod::Spec.new do |s| 5 Pod::Spec.new do |s|
6 s.name = 'sherpa_onnx_macos' 6 s.name = 'sherpa_onnx_macos'
7 - s.version = '1.10.17' 7 + s.version = '1.10.18'
8 s.summary = 'sherpa-onnx Flutter FFI plugin project.' 8 s.summary = 'sherpa-onnx Flutter FFI plugin project.'
9 s.description = <<-DESC 9 s.description = <<-DESC
10 sherpa-onnx Flutter FFI plugin project. 10 sherpa-onnx Flutter FFI plugin project.
@@ -93,6 +93,7 @@ The following tables list the examples in this folder. @@ -93,6 +93,7 @@ The following tables list the examples in this folder.
93 |---|---| 93 |---|---|
94 |[./test_asr_non_streaming_transducer.js](./test_asr_non_streaming_transducer.js)|Non-streaming speech recognition from a file with a Zipformer transducer model| 94 |[./test_asr_non_streaming_transducer.js](./test_asr_non_streaming_transducer.js)|Non-streaming speech recognition from a file with a Zipformer transducer model|
95 |[./test_asr_non_streaming_whisper.js](./test_asr_non_streaming_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper)| 95 |[./test_asr_non_streaming_whisper.js](./test_asr_non_streaming_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper)|
  96 +|[./test_vad_with_non_streaming_asr_whisper.js](./test_vad_with_non_streaming_asr_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper) + [Silero VAD](https://github.com/snakers4/silero-vad)|
96 |[./test_asr_non_streaming_nemo_ctc.js](./test_asr_non_streaming_nemo_ctc.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search| 97 |[./test_asr_non_streaming_nemo_ctc.js](./test_asr_non_streaming_nemo_ctc.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search|
97 |[./test_asr_non_streaming_paraformer.js](./test_asr_non_streaming_paraformer.js)|Non-streaming speech recognition from a file using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)| 98 |[./test_asr_non_streaming_paraformer.js](./test_asr_non_streaming_paraformer.js)|Non-streaming speech recognition from a file using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)|
98 |[./test_asr_non_streaming_sense_voice.js](./test_asr_non_streaming_sense_voice.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)| 99 |[./test_asr_non_streaming_sense_voice.js](./test_asr_non_streaming_sense_voice.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|
@@ -221,11 +222,24 @@ rm sherpa-onnx-whisper-tiny.en.tar.bz2 @@ -221,11 +222,24 @@ rm sherpa-onnx-whisper-tiny.en.tar.bz2
221 222
222 node ./test_asr_non_streaming_whisper.js 223 node ./test_asr_non_streaming_whisper.js
223 224
224 -# To run VAD + non-streaming ASR with Paraformer using a microphone 225 +# To run VAD + non-streaming ASR with Whisper using a microphone
225 npm install naudiodon2 226 npm install naudiodon2
226 node ./test_vad_asr_non_streaming_whisper_microphone.js 227 node ./test_vad_asr_non_streaming_whisper_microphone.js
227 ``` 228 ```
228 229
  230 +### Non-streaming speech recognition with Whisper + VAD
  231 +
  232 +```bash
  233 +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
  234 +tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
  235 +rm sherpa-onnx-whisper-tiny.en.tar.bz2
  236 +
  237 +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
  238 +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
  239 +
  240 +node ./test_vad_with_non_streaming_asr_whisper.js
  241 +```
  242 +
229 ### Non-streaming speech recognition with NeMo CTC models 243 ### Non-streaming speech recognition with NeMo CTC models
230 244
231 ```bash 245 ```bash
1 { 1 {
2 "dependencies": { 2 "dependencies": {
3 - "sherpa-onnx-node": "^1.10.17" 3 + "sherpa-onnx-node": "^1.10.18"
4 } 4 }
5 } 5 }
  1 +// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
  2 +
  3 +const sherpa_onnx = require('sherpa-onnx-node');
  4 +
  5 +function createRecognizer() {
  6 + // Please download test files from
  7 + // https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
  8 + const config = {
  9 + 'featConfig': {
  10 + 'sampleRate': 16000,
  11 + 'featureDim': 80,
  12 + },
  13 + 'modelConfig': {
  14 + 'whisper': {
  15 + 'encoder': './sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx',
  16 + 'decoder': './sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx',
  17 + },
  18 + 'tokens': './sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt',
  19 + 'numThreads': 2,
  20 + 'provider': 'cpu',
  21 + 'debug': 1,
  22 + }
  23 + };
  24 +
  25 + return new sherpa_onnx.OfflineRecognizer(config);
  26 +}
  27 +
  28 +function createVad() {
  29 + // please download silero_vad.onnx from
  30 + // https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
  31 + const config = {
  32 + sileroVad: {
  33 + model: './silero_vad.onnx',
  34 + threshold: 0.5,
  35 + minSpeechDuration: 0.25,
  36 + minSilenceDuration: 0.5,
  37 + windowSize: 512,
  38 + },
  39 + sampleRate: 16000,
  40 + debug: true,
  41 + numThreads: 1,
  42 + };
  43 +
  44 + const bufferSizeInSeconds = 60;
  45 +
  46 + return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
  47 +}
  48 +
  49 +const recognizer = createRecognizer();
  50 +const vad = createVad();
  51 +
  52 +// please download ./Obama.wav from
  53 +// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
  54 +const waveFilename = './Obama.wav';
  55 +const wave = sherpa_onnx.readWave(waveFilename);
  56 +
  57 +if (wave.sampleRate != recognizer.config.featConfig.sampleRate) {
  58 + throw new Error(
  59 + 'Expected sample rate: ${recognizer.config.featConfig.sampleRate}. Given: ${wave.sampleRate}');
  60 +}
  61 +
  62 +console.log('Started')
  63 +let start = Date.now();
  64 +
  65 +const windowSize = vad.config.sileroVad.windowSize;
  66 +for (let i = 0; i < wave.samples.length; i += windowSize) {
  67 + const thisWindow = wave.samples.subarray(i, i + windowSize);
  68 + vad.acceptWaveform(thisWindow);
  69 +
  70 + while (!vad.isEmpty()) {
  71 + const segment = vad.front();
  72 + vad.pop();
  73 +
  74 + let start_time = segment.start / wave.sampleRate;
  75 + let end_time = start_time + segment.samples.length / wave.sampleRate;
  76 +
  77 + start_time = start_time.toFixed(2);
  78 + end_time = end_time.toFixed(2);
  79 +
  80 + const stream = recognizer.createStream();
  81 + stream.acceptWaveform(
  82 + {samples: segment.samples, sampleRate: wave.sampleRate});
  83 +
  84 + recognizer.decode(stream);
  85 + const r = recognizer.getResult(stream);
  86 + if (r.text.length > 0) {
  87 + const text = r.text.toLowerCase().trim();
  88 + console.log(`${start_time} -- ${end_time}: ${text}`);
  89 + }
  90 + }
  91 +}
  92 +
  93 +vad.flush();
  94 +
  95 +while (!vad.isEmpty()) {
  96 + const segment = vad.front();
  97 + vad.pop();
  98 +
  99 + let start_time = segment.start / wave.sampleRate;
  100 + let end_time = start_time + segment.samples.length / wave.sampleRate;
  101 +
  102 + start_time = start_time.toFixed(2);
  103 + end_time = end_time.toFixed(2);
  104 +
  105 + const stream = recognizer.createStream();
  106 + stream.acceptWaveform(
  107 + {samples: segment.samples, sampleRate: wave.sampleRate});
  108 +
  109 + recognizer.decode(stream);
  110 + const r = recognizer.getResult(stream);
  111 + if (r.text.length > 0) {
  112 + const text = r.text.toLowerCase().trim();
  113 + console.log(`${start_time} -- ${end_time}: ${text}`);
  114 + }
  115 +}
  116 +
  117 +let stop = Date.now();
  118 +console.log('Done')
  119 +
  120 +const elapsed_seconds = (stop - start) / 1000;
  121 +const duration = wave.samples.length / wave.sampleRate;
  122 +const real_time_factor = elapsed_seconds / duration;
  123 +console.log('Wave duration', duration.toFixed(3), 'secodns')
  124 +console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
  125 +console.log(
  126 + `RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
  127 + real_time_factor.toFixed(3))
@@ -9,7 +9,7 @@ environment: @@ -9,7 +9,7 @@ environment:
9 sdk: ^3.4.0 9 sdk: ^3.4.0
10 10
11 dependencies: 11 dependencies:
12 - # sherpa_onnx: ^1.10.17 12 + # sherpa_onnx: ^1.10.18
13 sherpa_onnx: 13 sherpa_onnx:
14 path: ../../flutter/sherpa_onnx 14 path: ../../flutter/sherpa_onnx
15 path: ^1.9.0 15 path: ^1.9.0
@@ -17,7 +17,7 @@ topics: @@ -17,7 +17,7 @@ topics:
17 - voice-activity-detection 17 - voice-activity-detection
18 18
19 # remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx.podspec 19 # remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx.podspec
20 -version: 1.10.17 20 +version: 1.10.18
21 21
22 homepage: https://github.com/k2-fsa/sherpa-onnx 22 homepage: https://github.com/k2-fsa/sherpa-onnx
23 23
@@ -65,7 +65,7 @@ config = { @@ -65,7 +65,7 @@ config = {
65 } 65 }
66 66
67 clear() { 67 clear() {
68 - addon.VoiceActivityDetectorClearWrapper(this.handle); 68 + addon.voiceActivityDetectorClear(this.handle);
69 } 69 }
70 70
71 /* 71 /*
@@ -79,11 +79,11 @@ config = { @@ -79,11 +79,11 @@ config = {
79 } 79 }
80 80
81 reset() { 81 reset() {
82 - addon.VoiceActivityDetectorResetWrapper(this.handle); 82 + addon.voiceActivityDetectorReset(this.handle);
83 } 83 }
84 84
85 flush() { 85 flush() {
86 - addon.VoiceActivityDetectorFlushWrapper(this.handle); 86 + addon.voiceActivityDetectorFlush(this.handle);
87 } 87 }
88 } 88 }
89 89
@@ -306,8 +306,7 @@ std::string OfflineRecognitionResult::AsJsonString() const { @@ -306,8 +306,7 @@ std::string OfflineRecognitionResult::AsJsonString() const {
306 os << "{"; 306 os << "{";
307 os << "\"text\"" 307 os << "\"text\""
308 << ": "; 308 << ": ";
309 - os << "\"" << text << "\""  
310 - << ", "; 309 + os << std::quoted(text) << ", ";
311 310
312 os << "\"" 311 os << "\""
313 << "timestamps" 312 << "timestamps"
@@ -339,7 +338,7 @@ std::string OfflineRecognitionResult::AsJsonString() const { @@ -339,7 +338,7 @@ std::string OfflineRecognitionResult::AsJsonString() const {
339 << "\""; 338 << "\"";
340 os.flags(oldFlags); 339 os.flags(oldFlags);
341 } else { 340 } else {
342 - os << sep << "\"" << t << "\""; 341 + os << sep << std::quoted(t);
343 } 342 }
344 sep = ", "; 343 sep = ", ";
345 } 344 }
@@ -44,7 +44,7 @@ std::string VecToString<std::string>(const std::vector<std::string> &vec, @@ -44,7 +44,7 @@ std::string VecToString<std::string>(const std::vector<std::string> &vec,
44 oss << "["; 44 oss << "[";
45 std::string sep = ""; 45 std::string sep = "";
46 for (const auto &item : vec) { 46 for (const auto &item : vec) {
47 - oss << sep << "\"" << item << "\""; 47 + oss << sep << std::quoted(item);
48 sep = ", "; 48 sep = ", ";
49 } 49 }
50 oss << "]"; 50 oss << "]";
@@ -54,9 +54,7 @@ std::string VecToString<std::string>(const std::vector<std::string> &vec, @@ -54,9 +54,7 @@ std::string VecToString<std::string>(const std::vector<std::string> &vec,
54 std::string OnlineRecognizerResult::AsJsonString() const { 54 std::string OnlineRecognizerResult::AsJsonString() const {
55 std::ostringstream os; 55 std::ostringstream os;
56 os << "{ "; 56 os << "{ ";
57 - os << "\"text\": "  
58 - << "\"" << text << "\""  
59 - << ", "; 57 + os << "\"text\": " << std::quoted(text) << ", ";
60 os << "\"tokens\": " << VecToString(tokens) << ", "; 58 os << "\"tokens\": " << VecToString(tokens) << ", ";
61 os << "\"timestamps\": " << VecToString(timestamps, 2) << ", "; 59 os << "\"timestamps\": " << VecToString(timestamps, 2) << ", ";
62 os << "\"ys_probs\": " << VecToString(ys_probs, 6) << ", "; 60 os << "\"ys_probs\": " << VecToString(ys_probs, 6) << ", ";