spoken_language_identification.dart
1.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('encoder', help: 'Path to the whisper encoder model')
..addOption('decoder', help: 'Path to the whisper decoder model')
..addOption('tail-paddings', help: 'Tail paddings for the whisper model', defaultsTo: '0')
..addOption('wav', help: 'Path to test.wav for language identification')
..addFlag('help', abbr: 'h', help: 'Show this help message', negatable: false);
final res = parser.parse(arguments);
if (res['help'] as bool) {
print(parser.usage);
exit(0);
}
if (res['encoder'] == null || res['decoder'] == null || res['wav'] == null) {
print(parser.usage);
exit(1);
}
final encoder = res['encoder'] as String;
final decoder = res['decoder'] as String;
final tailPaddings = int.tryParse(res['tail-paddings'] as String) ?? 0;
final wav = res['wav'] as String;
final whisperConfig = sherpa_onnx.SpokenLanguageIdentificationWhisperConfig(
encoder: encoder,
decoder: decoder,
tailPaddings: tailPaddings,
);
final config = sherpa_onnx.SpokenLanguageIdentificationConfig(
whisper: whisperConfig,
numThreads: 1,
debug: true,
provider: 'cpu',
);
final slid = sherpa_onnx.SpokenLanguageIdentification(config);
final waveData = sherpa_onnx.readWave(wav);
final stream = slid.createStream();
stream.acceptWaveform(samples: waveData.samples, sampleRate: waveData.sampleRate);
final result = slid.compute(stream);
print('File: $wav');
print('Detected language: ${result.lang}');
stream.free();
slid.free();
}