Fangjun Kuang
Committed by GitHub

Add vad with non-streaming ASR examples for Dart API (#1180)

... ... @@ -4,6 +4,33 @@ set -ex
cd dart-api-examples
pushd vad-with-non-streaming-asr
echo '----------TeleSpeech CTC----------'
./run-telespeech-ctc.sh
rm -rf sherpa-onnx-*
echo "----zipformer transducer----"
./run-zipformer-transducer.sh
rm -rf sherpa-onnx-*
echo "----whisper----"
./run-whisper.sh
rm -rf sherpa-onnx-*
echo "----paraformer----"
./run-paraformer.sh
rm -rf sherpa-onnx-*
echo "----SenseVoice zh----"
./run-sense-voice-zh.sh
rm -rf sherpa-onnx-*
echo "----SenseVoice en----"
./run-sense-voice-en.sh
rm -rf sherpa-onnx-*
popd
pushd keyword-spotter
./run-zh.sh
popd
... ...
... ... @@ -109,6 +109,8 @@ jobs:
cp scripts/dart/streaming-asr-pubspec.yaml dart-api-examples/streaming-asr/pubspec.yaml
cp scripts/dart/tts-pubspec.yaml dart-api-examples/tts/pubspec.yaml
cp scripts/dart/kws-pubspec.yaml dart-api-examples/keyword-spotter/pubspec.yaml
cp scripts/dart/vad-non-streaming-asr-pubspec.yaml dart-api-examples/vad-with-non-streaming-asr/pubspec.yaml
cp scripts/dart/sherpa-onnx-pubspec.yaml flutter/sherpa_onnx/pubspec.yaml
.github/scripts/test-dart.sh
... ...
... ... @@ -5,6 +5,17 @@ This directory contains examples for Dart API.
You can find the package at
https://pub.dev/packages/sherpa_onnx
## Descirption
| Directory | Description |
|-----------|-------------|
| [./keyword-spotter](./keyword-spotter)| Example for keyword spotting|
| [./non-streaming-asr](./non-streaming-asr)| Example for non-streaming speech recognition|
| [./streaming-asr](./streaming-asr)| Example for streaming speech recognition|
| [./tts](./tts)| Example for text to speech|
| [./vad](./vad)| Example for voice activity detection|
| [./vad-with-non-streaming-asr](./vad-with-non-streaming-asr)| Example for voice activity detection with non-streaming speech recognition. You can use it to generate subtitles.|
## How to create an example in this folder
```bash
... ...
... ... @@ -11,7 +11,7 @@ void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the paraformer model')
..addOption('model', help: 'Path to the SenseVoice model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('language',
help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto',
... ...
# https://dart.dev/guides/libraries/private-files
# Created by `dart pub`
.dart_tool/
... ...
# Introduction
This folder contains examples for non-streaming ASR + voice activity detection
with Dart API.
| File | Description|
|------|------------|
|[./bin/paraformer.dart](./bin/paraformer.dart)| Use a Paraformer model for speech recognition. See [./run-paraformer.sh](./run-paraformer.sh)|
|[./bin/sense-voice.dart](./bin/sense-voice.dart)| Use a SenseVoice Ctc model for speech recognition. See [./run-sense-voice-zh.sh](./run-sense-voice-zh.sh) and [./run-sense-voice-en.sh](./run-sense-voice-en.sh)|
|[./bin/telespeech-ctc.dart](./bin/telespeech-ctc.dart)| Use a TeleSpeech CTC model for speech recognition. See [./run-telespeech-ctc.sh](./run-telespeech-ctc.sh)|
|[./bin/whisper.dart](./bin/whisper.dart)| Use a Whisper model for speech recognition. See [./run-whisper.sh](./run-whisper.sh)|
|[./bin/zipformer-transducer.dart](./bin/zipformer-transducer.dart)| Use a Zipformer transducer model for speech recognition. See [./run-zipformer-transducer.sh](./run-zipformer-transducer.sh)|
... ...
# This file configures the static analysis results for your project (errors,
# warnings, and lints).
#
# This enables the 'recommended' set of lints from `package:lints`.
# This set helps identify many issues that may lead to problems when running
# or consuming Dart code, and enforces writing Dart using a single, idiomatic
# style and format.
#
# If you want a smaller set of lints you can change this to specify
# 'package:lints/core.yaml'. These are just the most critical lints
# (the recommended set includes the core lints).
# The core lints are also what is used by pub.dev for scoring packages.
include: package:lints/recommended.yaml
# Uncomment the following section to specify additional rules.
# linter:
# rules:
# - camel_case_types
# analyzer:
# exclude:
# - path/to/excluded/files/**
# For more information about the core and recommended set of lints, see
# https://dart.dev/go/core-lints
# For additional information about configuring this file, see
# https://dart.dev/guides/language/analysis-options
... ...
../../vad/bin/init.dart
\ No newline at end of file
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('silero-vad', help: 'Path to silero_vad.onnx')
..addOption('model', help: 'Path to the paraformer model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['silero-vad'] == null ||
res['model'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
// create VAD
final sileroVad = res['silero-vad'] as String;
final sileroVadConfig = sherpa_onnx.SileroVadModelConfig(
model: sileroVad,
minSilenceDuration: 0.25,
minSpeechDuration: 0.5,
);
final vadConfig = sherpa_onnx.VadModelConfig(
sileroVad: sileroVadConfig,
numThreads: 1,
debug: true,
);
final vad = sherpa_onnx.VoiceActivityDetector(
config: vadConfig, bufferSizeInSeconds: 10);
// create paraformer recognizer
final model = res['model'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final paraformer = sherpa_onnx.OfflineParaformerModelConfig(
model: model,
);
final modelConfig = sherpa_onnx.OfflineModelConfig(
paraformer: paraformer,
tokens: tokens,
debug: true,
numThreads: 1,
modelType: 'paraformer',
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
if (waveData.sampleRate != 16000) {
print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}');
exit(1);
}
int numSamples = waveData.samples.length;
int numIter = numSamples ~/ vadConfig.sileroVad.windowSize;
for (int i = 0; i != numIter; ++i) {
int start = i * vadConfig.sileroVad.windowSize;
vad.acceptWaveform(Float32List.sublistView(
waveData.samples, start, start + vadConfig.sileroVad.windowSize));
if (vad.isDetected()) {
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime =
startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
}
}
vad.flush();
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime = startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
vad.free();
recognizer.free();
}
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('silero-vad', help: 'Path to silero_vad.onnx')
..addOption('model', help: 'Path to the SenseVoice model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('language',
help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto',
defaultsTo: '')
..addOption('use-itn',
help: 'true to use inverse text normalization', defaultsTo: 'false')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['silero-vad'] == null ||
res['model'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
// create VAD
final sileroVad = res['silero-vad'] as String;
final sileroVadConfig = sherpa_onnx.SileroVadModelConfig(
model: sileroVad,
minSilenceDuration: 0.25,
minSpeechDuration: 0.5,
);
final vadConfig = sherpa_onnx.VadModelConfig(
sileroVad: sileroVadConfig,
numThreads: 1,
debug: true,
);
final vad = sherpa_onnx.VoiceActivityDetector(
config: vadConfig, bufferSizeInSeconds: 10);
// create SenseVoice
final model = res['model'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final language = res['language'] as String;
final useItn = (res['use-itn'] as String).toLowerCase() == 'true';
final senseVoice = sherpa_onnx.OfflineSenseVoiceModelConfig(
model: model, language: language, useInverseTextNormalization: useItn);
final modelConfig = sherpa_onnx.OfflineModelConfig(
senseVoice: senseVoice,
tokens: tokens,
debug: true,
numThreads: 1,
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
if (waveData.sampleRate != 16000) {
print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}');
exit(1);
}
int numSamples = waveData.samples.length;
int numIter = numSamples ~/ vadConfig.sileroVad.windowSize;
for (int i = 0; i != numIter; ++i) {
int start = i * vadConfig.sileroVad.windowSize;
vad.acceptWaveform(Float32List.sublistView(
waveData.samples, start, start + vadConfig.sileroVad.windowSize));
if (vad.isDetected()) {
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime =
startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
}
}
vad.flush();
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime = startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
vad.free();
recognizer.free();
}
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('silero-vad', help: 'Path to silero_vad.onnx')
..addOption('model', help: 'Path to the telespeech CTC model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['silero-vad'] == null ||
res['model'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
// create VAD
final sileroVad = res['silero-vad'] as String;
final sileroVadConfig = sherpa_onnx.SileroVadModelConfig(
model: sileroVad,
minSilenceDuration: 0.25,
minSpeechDuration: 0.5,
);
final vadConfig = sherpa_onnx.VadModelConfig(
sileroVad: sileroVadConfig,
numThreads: 1,
debug: true,
);
final vad = sherpa_onnx.VoiceActivityDetector(
config: vadConfig, bufferSizeInSeconds: 10);
// create telespeech CTC recognizer
final model = res['model'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final modelConfig = sherpa_onnx.OfflineModelConfig(
telespeechCtc: model,
tokens: tokens,
debug: true,
numThreads: 1,
modelType: 'telespeech_ctc',
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
if (waveData.sampleRate != 16000) {
print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}');
exit(1);
}
int numSamples = waveData.samples.length;
int numIter = numSamples ~/ vadConfig.sileroVad.windowSize;
for (int i = 0; i != numIter; ++i) {
int start = i * vadConfig.sileroVad.windowSize;
vad.acceptWaveform(Float32List.sublistView(
waveData.samples, start, start + vadConfig.sileroVad.windowSize));
if (vad.isDetected()) {
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime =
startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
}
}
vad.flush();
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime = startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
vad.free();
recognizer.free();
}
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('silero-vad', help: 'Path to silero_vad.onnx')
..addOption('encoder', help: 'Path to the whisper encoder model')
..addOption('decoder', help: 'Path to whisper decoder model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['silero-vad'] == null ||
res['encoder'] == null ||
res['decoder'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
// create VAD
final sileroVad = res['silero-vad'] as String;
final sileroVadConfig = sherpa_onnx.SileroVadModelConfig(
model: sileroVad,
minSilenceDuration: 0.25,
minSpeechDuration: 0.5,
);
final vadConfig = sherpa_onnx.VadModelConfig(
sileroVad: sileroVadConfig,
numThreads: 1,
debug: true,
);
final vad = sherpa_onnx.VoiceActivityDetector(
config: vadConfig, bufferSizeInSeconds: 10);
// create whisper recognizer
final encoder = res['encoder'] as String;
final decoder = res['decoder'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final whisper = sherpa_onnx.OfflineWhisperModelConfig(
encoder: encoder,
decoder: decoder,
);
final modelConfig = sherpa_onnx.OfflineModelConfig(
whisper: whisper,
tokens: tokens,
modelType: 'whisper',
debug: false,
numThreads: 1,
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
if (waveData.sampleRate != 16000) {
print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}');
exit(1);
}
int numSamples = waveData.samples.length;
int numIter = numSamples ~/ vadConfig.sileroVad.windowSize;
for (int i = 0; i != numIter; ++i) {
int start = i * vadConfig.sileroVad.windowSize;
vad.acceptWaveform(Float32List.sublistView(
waveData.samples, start, start + vadConfig.sileroVad.windowSize));
if (vad.isDetected()) {
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime =
startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
}
}
vad.flush();
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime = startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
vad.free();
recognizer.free();
}
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('silero-vad', help: 'Path to silero_vad.onnx')
..addOption('encoder', help: 'Path to the encoder model')
..addOption('decoder', help: 'Path to decoder model')
..addOption('joiner', help: 'Path to joiner model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['silero-vad'] == null ||
res['encoder'] == null ||
res['decoder'] == null ||
res['joiner'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
// create VAD
final sileroVad = res['silero-vad'] as String;
final sileroVadConfig = sherpa_onnx.SileroVadModelConfig(
model: sileroVad,
minSilenceDuration: 0.25,
minSpeechDuration: 0.5,
);
final vadConfig = sherpa_onnx.VadModelConfig(
sileroVad: sileroVadConfig,
numThreads: 1,
debug: true,
);
final vad = sherpa_onnx.VoiceActivityDetector(
config: vadConfig, bufferSizeInSeconds: 10);
// create zipformer transducer recognizer
final encoder = res['encoder'] as String;
final decoder = res['decoder'] as String;
final joiner = res['joiner'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final transducer = sherpa_onnx.OfflineTransducerModelConfig(
encoder: encoder,
decoder: decoder,
joiner: joiner,
);
final modelConfig = sherpa_onnx.OfflineModelConfig(
transducer: transducer,
tokens: tokens,
debug: true,
numThreads: 1,
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
if (waveData.sampleRate != 16000) {
print('Only 16000 Hz is supported. Given: ${waveData.sampleRate}');
exit(1);
}
int numSamples = waveData.samples.length;
int numIter = numSamples ~/ vadConfig.sileroVad.windowSize;
for (int i = 0; i != numIter; ++i) {
int start = i * vadConfig.sileroVad.windowSize;
vad.acceptWaveform(Float32List.sublistView(
waveData.samples, start, start + vadConfig.sileroVad.windowSize));
if (vad.isDetected()) {
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime =
startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
}
}
vad.flush();
while (!vad.isEmpty()) {
final samples = vad.front().samples;
final startTime = vad.front().start.toDouble() / waveData.sampleRate;
final endTime = startTime + samples.length.toDouble() / waveData.sampleRate;
final stream = recognizer.createStream();
stream.acceptWaveform(samples: samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
stream.free();
print(
'${startTime.toStringAsPrecision(5)} -- ${endTime.toStringAsPrecision(5)} : ${result.text}');
vad.pop();
}
vad.free();
recognizer.free();
}
... ...
name: vad_with_non_streaming_asr
description: >
This example demonstrates how to use the Dart API for VAD (voice activity detection)
with non-streaming speech recognition.
version: 1.0.0
environment:
sdk: ^3.4.0
dependencies:
sherpa_onnx: ^1.10.19
path: ^1.9.0
args: ^2.5.0
dev_dependencies:
lints: ^3.0.0
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-paraformer-zh-2023-09-14/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2
tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2
rm sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2
fi
if [ ! -f ./lei-jun-test.wav ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav
fi
if [[ ! -f ./silero_vad.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
fi
dart run \
./bin/paraformer.dart \
--silero-vad ./silero_vad.onnx \
--model ./sherpa-onnx-paraformer-zh-2023-09-14/model.int8.onnx \
--tokens ./sherpa-onnx-paraformer-zh-2023-09-14/tokens.txt \
--input-wav ./lei-jun-test.wav
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
fi
if [ ! -f ./Obama.wav ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
fi
if [[ ! -f ./silero_vad.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
fi
dart run \
./bin/sense-voice.dart \
--silero-vad ./silero_vad.onnx \
--model ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.onnx \
--tokens ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt \
--use-itn true \
--input-wav ./Obama.wav
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
fi
if [ ! -f ./lei-jun-test.wav ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav
fi
if [[ ! -f ./silero_vad.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
fi
dart run \
./bin/sense-voice.dart \
--silero-vad ./silero_vad.onnx \
--model ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.onnx \
--tokens ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt \
--use-itn true \
--input-wav ./lei-jun-test.wav
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2
tar xvf sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2
rm sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2
fi
if [ ! -f ./lei-jun-test.wav ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav
fi
if [[ ! -f ./silero_vad.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
fi
dart run \
./bin/telespeech-ctc.dart \
--silero-vad ./silero_vad.onnx \
--model ./sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04/model.int8.onnx \
--tokens ./sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04/tokens.txt \
--input-wav ./lei-jun-test.wav
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
rm sherpa-onnx-whisper-tiny.en.tar.bz2
fi
if [ ! -f ./Obama.wav ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
fi
if [[ ! -f ./silero_vad.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
fi
dart run \
./bin/whisper.dart \
--silero-vad ./silero_vad.onnx \
--encoder ./sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx \
--decoder ./sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx \
--tokens ./sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt \
--input-wav ./Obama.wav
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-gigaspeech-2023-12-12.tar.bz2
tar xvf sherpa-onnx-zipformer-gigaspeech-2023-12-12.tar.bz2
rm sherpa-onnx-zipformer-gigaspeech-2023-12-12.tar.bz2
fi
if [ ! -f ./Obama.wav ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
fi
if [[ ! -f ./silero_vad.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
fi
dart run \
./bin/zipformer-transducer.dart \
--silero-vad ./silero_vad.onnx \
--encoder ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/encoder-epoch-30-avg-1.int8.onnx \
--decoder ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/decoder-epoch-30-avg-1.onnx \
--joiner ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/joiner-epoch-30-avg-1.int8.onnx \
--tokens ./sherpa-onnx-zipformer-gigaspeech-2023-12-12/tokens.txt \
--input-wav ./Obama.wav
... ...
name: vad_with_non_streaming_asr
description: >
This example demonstrates how to use the Dart API for VAD (voice activity detection)
with non-streaming speech recognition.
version: 1.0.0
environment:
sdk: ^3.4.0
dependencies:
sherpa_onnx:
path: ../../flutter/sherpa_onnx
path: ^1.9.0
args: ^2.5.0
dev_dependencies:
lints: ^3.0.0
... ...