sense-voice.dart
1.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the paraformer model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('language',
help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto',
defaultsTo: '')
..addOption('use-itn',
help: 'true to use inverse text normalization', defaultsTo: 'false')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['model'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final language = res['language'] as String;
final useItn = (res['use-itn'] as String).toLowerCase() == 'true';
final senseVoice = sherpa_onnx.OfflineSenseVoiceModelConfig(
model: model, language: language, useInverseTextNormalization: useItn);
final modelConfig = sherpa_onnx.OfflineModelConfig(
senseVoice: senseVoice,
tokens: tokens,
debug: true,
numThreads: 1,
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
print(result.text);
stream.free();
recognizer.free();
}