Fangjun Kuang
Committed by GitHub

Add dart API for SenseVoice (#1159)

... ... @@ -6,6 +6,10 @@ cd dart-api-examples
pushd non-streaming-asr
echo '----------SenseVoice----------'
./run-sense-voice.sh
rm -rf sherpa-onnx-*
echo '----------NeMo transducer----------'
./run-nemo-transducer.sh
rm -rf sherpa-onnx-*
... ...
... ... @@ -11,4 +11,5 @@ This folder contains examples for non-streaming ASR with Dart API.
|[./bin/whisper.dart](./bin/whisper.dart)| Use whisper for speech recognition. See [./run-whisper.sh](./run-whisper.sh)|
|[./bin/zipformer-transducer.dart](./bin/zipformer-transducer.dart)| Use a zipformer transducer for speech recognition. See [./run-zipformer-transducer.sh](./run-zipformer-transducer.sh)|
|[./bin/vad-with-paraformer.dart](./bin/vad-with-paraformer.dart)| Use a [silero-vad](https://github.com/snakers4/silero-vad) with paraformer for speech recognition. See [./run-vad-with-paraformer.sh](./run-vad-with-paraformer.sh)|
|[./bin/sense-voice.dart](./bin/sense-voice.dart)| Use a SenseVoice CTC model for speech recognition. See [./run-sense-voice.sh](./run-sense-voice.sh)|
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the paraformer model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('language',
help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto',
defaultsTo: '')
..addOption('use-itn',
help: 'true to use inverse text normalization', defaultsTo: 'false')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['model'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final language = res['language'] as String;
final useItn = (res['use-itn'] as String).toLowerCase() == 'true';
final senseVoice = sherpa_onnx.OfflineSenseVoiceModelConfig(
model: model, language: language, useInverseTextNormalization: useItn);
final modelConfig = sherpa_onnx.OfflineModelConfig(
senseVoice: senseVoice,
tokens: tokens,
debug: true,
numThreads: 1,
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
print(result.text);
stream.free();
recognizer.free();
}
... ...
... ... @@ -10,7 +10,7 @@ environment:
# Add regular dependencies here.
dependencies:
sherpa_onnx: ^1.10.16
sherpa_onnx: ^1.10.17
path: ^1.9.0
args: ^2.5.0
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
fi
dart run \
./bin/sense-voice.dart \
--model ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx \
--tokens ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt \
--use-itn true \
--input-wav ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/zh.wav
... ...
... ... @@ -11,7 +11,7 @@ environment:
# Add regular dependencies here.
dependencies:
sherpa_onnx: ^1.10.16
sherpa_onnx: ^1.10.17
path: ^1.9.0
args: ^2.5.0
... ...
... ... @@ -8,7 +8,7 @@ environment:
# Add regular dependencies here.
dependencies:
sherpa_onnx: ^1.10.16
sherpa_onnx: ^1.10.17
path: ^1.9.0
args: ^2.5.0
... ...
... ... @@ -9,7 +9,7 @@ environment:
sdk: ^3.4.0
dependencies:
sherpa_onnx: ^1.10.16
sherpa_onnx: ^1.10.17
path: ^1.9.0
args: ^2.5.0
... ...
... ... @@ -5,7 +5,7 @@ description: >
publish_to: 'none'
version: 1.10.16
version: 1.10.17
topics:
- speech-recognition
... ... @@ -30,7 +30,7 @@ dependencies:
record: ^5.1.0
url_launcher: ^6.2.6
sherpa_onnx: ^1.10.16
sherpa_onnx: ^1.10.17
# sherpa_onnx:
# path: ../../flutter/sherpa_onnx
... ...
... ... @@ -5,7 +5,7 @@ description: >
publish_to: 'none' # Remove this line if you wish to publish to pub.dev
version: 1.10.16
version: 1.10.17
environment:
sdk: '>=3.4.0 <4.0.0'
... ... @@ -17,7 +17,7 @@ dependencies:
cupertino_icons: ^1.0.6
path_provider: ^2.1.3
path: ^1.9.0
sherpa_onnx: ^1.10.16
sherpa_onnx: ^1.10.17
url_launcher: ^6.2.6
audioplayers: ^5.0.0
... ...
... ... @@ -79,6 +79,23 @@ class OfflineTdnnModelConfig {
final String model;
}
class OfflineSenseVoiceModelConfig {
const OfflineSenseVoiceModelConfig({
this.model = '',
this.language = '',
this.useInverseTextNormalization = false,
});
@override
String toString() {
return 'OfflineSenseVoiceModelConfig(model: $model, language: $language, useInverseTextNormalization: $useInverseTextNormalization)';
}
final String model;
final String language;
final bool useInverseTextNormalization;
}
class OfflineLMConfig {
const OfflineLMConfig({this.model = '', this.scale = 1.0});
... ... @@ -98,6 +115,7 @@ class OfflineModelConfig {
this.nemoCtc = const OfflineNemoEncDecCtcModelConfig(),
this.whisper = const OfflineWhisperModelConfig(),
this.tdnn = const OfflineTdnnModelConfig(),
this.senseVoice = const OfflineSenseVoiceModelConfig(),
required this.tokens,
this.numThreads = 1,
this.debug = true,
... ... @@ -110,7 +128,7 @@ class OfflineModelConfig {
@override
String toString() {
return 'OfflineModelConfig(transducer: $transducer, paraformer: $paraformer, nemoCtc: $nemoCtc, whisper: $whisper, tdnn: $tdnn, tokens: $tokens, numThreads: $numThreads, debug: $debug, provider: $provider, modelType: $modelType, modelingUnit: $modelingUnit, bpeVocab: $bpeVocab, telespeechCtc: $telespeechCtc)';
return 'OfflineModelConfig(transducer: $transducer, paraformer: $paraformer, nemoCtc: $nemoCtc, whisper: $whisper, tdnn: $tdnn, senseVoice: $senseVoice, tokens: $tokens, numThreads: $numThreads, debug: $debug, provider: $provider, modelType: $modelType, modelingUnit: $modelingUnit, bpeVocab: $bpeVocab, telespeechCtc: $telespeechCtc)';
}
final OfflineTransducerModelConfig transducer;
... ... @@ -118,6 +136,7 @@ class OfflineModelConfig {
final OfflineNemoEncDecCtcModelConfig nemoCtc;
final OfflineWhisperModelConfig whisper;
final OfflineTdnnModelConfig tdnn;
final OfflineSenseVoiceModelConfig senseVoice;
final String tokens;
final int numThreads;
... ... @@ -219,6 +238,14 @@ class OfflineRecognizer {
c.ref.model.tdnn.model = config.model.tdnn.model.toNativeUtf8();
c.ref.model.senseVoice.model = config.model.senseVoice.model.toNativeUtf8();
c.ref.model.senseVoice.language =
config.model.senseVoice.language.toNativeUtf8();
c.ref.model.senseVoice.useInverseTextNormalization =
config.model.senseVoice.useInverseTextNormalization ? 1 : 0;
c.ref.model.tokens = config.model.tokens.toNativeUtf8();
c.ref.model.numThreads = config.model.numThreads;
... ... @@ -254,6 +281,8 @@ class OfflineRecognizer {
calloc.free(c.ref.model.modelType);
calloc.free(c.ref.model.provider);
calloc.free(c.ref.model.tokens);
calloc.free(c.ref.model.senseVoice.language);
calloc.free(c.ref.model.senseVoice.model);
calloc.free(c.ref.model.tdnn.model);
calloc.free(c.ref.model.whisper.task);
calloc.free(c.ref.model.whisper.language);
... ...
... ... @@ -87,6 +87,14 @@ final class SherpaOnnxOfflineTdnnModelConfig extends Struct {
external Pointer<Utf8> model;
}
final class SherpaOnnxOfflineSenseVoiceModelConfig extends Struct {
external Pointer<Utf8> model;
external Pointer<Utf8> language;
@Int32()
external int useInverseTextNormalization;
}
final class SherpaOnnxOfflineLMConfig extends Struct {
external Pointer<Utf8> model;
... ... @@ -115,6 +123,8 @@ final class SherpaOnnxOfflineModelConfig extends Struct {
external Pointer<Utf8> modelingUnit;
external Pointer<Utf8> bpeVocab;
external Pointer<Utf8> telespeechCtc;
external SherpaOnnxOfflineSenseVoiceModelConfig senseVoice;
}
final class SherpaOnnxOfflineRecognizerConfig extends Struct {
... ...
... ... @@ -17,7 +17,7 @@ topics:
- voice-activity-detection
# remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx_macos.podspec
version: 1.10.16
version: 1.10.17
homepage: https://github.com/k2-fsa/sherpa-onnx
... ... @@ -30,19 +30,19 @@ dependencies:
flutter:
sdk: flutter
sherpa_onnx_android: ^1.10.16
sherpa_onnx_android: ^1.10.17
# path: ../sherpa_onnx_android
sherpa_onnx_macos: ^1.10.16
sherpa_onnx_macos: ^1.10.17
# path: ../sherpa_onnx_macos
sherpa_onnx_linux: ^1.10.16
sherpa_onnx_linux: ^1.10.17
# path: ../sherpa_onnx_linux
#
sherpa_onnx_windows: ^1.10.16
sherpa_onnx_windows: ^1.10.17
# path: ../sherpa_onnx_windows
sherpa_onnx_ios: ^1.10.16
sherpa_onnx_ios: ^1.10.17
# sherpa_onnx_ios:
# path: ../sherpa_onnx_ios
... ...
... ... @@ -7,7 +7,7 @@
# https://groups.google.com/g/dart-ffi/c/nUATMBy7r0c
Pod::Spec.new do |s|
s.name = 'sherpa_onnx_ios'
s.version = '1.10.16'
s.version = '1.10.17'
s.summary = 'A new Flutter FFI plugin project.'
s.description = <<-DESC
A new Flutter FFI plugin project.
... ...
... ... @@ -4,7 +4,7 @@
#
Pod::Spec.new do |s|
s.name = 'sherpa_onnx_macos'
s.version = '1.10.16'
s.version = '1.10.17'
s.summary = 'sherpa-onnx Flutter FFI plugin project.'
s.description = <<-DESC
sherpa-onnx Flutter FFI plugin project.
... ...
... ... @@ -17,7 +17,7 @@ topics:
- voice-activity-detection
# remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx.podspec
version: 1.10.16
version: 1.10.17
homepage: https://github.com/k2-fsa/sherpa-onnx
... ...
... ... @@ -18,7 +18,8 @@ void CudaConfig::Register(ParseOptions *po) {
bool CudaConfig::Validate() const {
if (cudnn_conv_algo_search < 1 || cudnn_conv_algo_search > 3) {
SHERPA_ONNX_LOGE("cudnn_conv_algo_search: '%d' is not a valid option."
SHERPA_ONNX_LOGE(
"cudnn_conv_algo_search: '%d' is not a valid option."
"Options : [1,3]. Check OnnxRT docs",
cudnn_conv_algo_search);
return false;
... ... @@ -60,7 +61,7 @@ void TensorrtConfig::Register(ParseOptions *po) {
bool TensorrtConfig::Validate() const {
if (trt_max_workspace_size < 0) {
SHERPA_ONNX_LOGE("trt_max_workspace_size: %lld is not valid.",
SHERPA_ONNX_LOGE("trt_max_workspace_size: %ld is not valid.",
trt_max_workspace_size);
return false;
}
... ... @@ -83,23 +84,19 @@ std::string TensorrtConfig::ToString() const {
os << "TensorrtConfig(";
os << "trt_max_workspace_size=" << trt_max_workspace_size << ", ";
os << "trt_max_partition_iterations="
<< trt_max_partition_iterations << ", ";
os << "trt_max_partition_iterations=" << trt_max_partition_iterations << ", ";
os << "trt_min_subgraph_size=" << trt_min_subgraph_size << ", ";
os << "trt_fp16_enable=\""
<< (trt_fp16_enable? "True" : "False") << "\", ";
os << "trt_fp16_enable=\"" << (trt_fp16_enable ? "True" : "False") << "\", ";
os << "trt_detailed_build_log=\""
<< (trt_detailed_build_log? "True" : "False") << "\", ";
<< (trt_detailed_build_log ? "True" : "False") << "\", ";
os << "trt_engine_cache_enable=\""
<< (trt_engine_cache_enable? "True" : "False") << "\", ";
os << "trt_engine_cache_path=\""
<< trt_engine_cache_path.c_str() << "\", ";
<< (trt_engine_cache_enable ? "True" : "False") << "\", ";
os << "trt_engine_cache_path=\"" << trt_engine_cache_path.c_str() << "\", ";
os << "trt_timing_cache_enable=\""
<< (trt_timing_cache_enable? "True" : "False") << "\", ";
os << "trt_timing_cache_path=\""
<< trt_timing_cache_path.c_str() << "\",";
os << "trt_dump_subgraphs=\""
<< (trt_dump_subgraphs? "True" : "False") << "\" )";
<< (trt_timing_cache_enable ? "True" : "False") << "\", ";
os << "trt_timing_cache_path=\"" << trt_timing_cache_path.c_str() << "\",";
os << "trt_dump_subgraphs=\"" << (trt_dump_subgraphs ? "True" : "False")
<< "\" )";
return os.str();
}
... ...