Fangjun Kuang
Committed by GitHub

Add Dart API for audio tagging (#1181)

... ... @@ -4,6 +4,14 @@ set -ex
cd dart-api-examples
pushd audio-tagging
echo '----------zipformer----------'
./run-zipformer.sh
echo '----------ced----------'
./run-ced.sh
popd
pushd vad-with-non-streaming-asr
echo '----------TeleSpeech CTC----------'
./run-telespeech-ctc.sh
... ...
... ... @@ -110,6 +110,7 @@ jobs:
cp scripts/dart/tts-pubspec.yaml dart-api-examples/tts/pubspec.yaml
cp scripts/dart/kws-pubspec.yaml dart-api-examples/keyword-spotter/pubspec.yaml
cp scripts/dart/vad-non-streaming-asr-pubspec.yaml dart-api-examples/vad-with-non-streaming-asr/pubspec.yaml
cp scripts/dart/audio-tagging-pubspec.yaml dart-api-examples/audio-tagging/pubspec.yaml
cp scripts/dart/sherpa-onnx-pubspec.yaml flutter/sherpa_onnx/pubspec.yaml
... ...
## 1.10.20
* Add Dart API for audio tagging
## 1.10.19
* Prefix all C API functions with SherpaOnnx
... ...
... ... @@ -4,9 +4,13 @@
|------------------|------------------|----------------------|------------------------|
| ✔️ | ✔️ | ✔️ | ✔️ |
| Spoken Language identification | Audio tagging | Voice activity detection | Keyword spotting |
|--------------------------------|---------------|--------------------------|------------------|
| ✔️ | ✔️ | ✔️ | ✔️ |
| Spoken Language identification | Audio tagging | Voice activity detection |
|--------------------------------|---------------|--------------------------|
| ✔️ | ✔️ | ✔️ |
| Keyword spotting | Add punctuation |
|------------------|-----------------|
| ✔️ | ✔️ |
### Supported platforms
... ...
... ... @@ -5,7 +5,7 @@ This directory contains examples for Dart API.
You can find the package at
https://pub.dev/packages/sherpa_onnx
## Descirption
## Description
| Directory | Description |
|-----------|-------------|
... ... @@ -15,6 +15,7 @@ https://pub.dev/packages/sherpa_onnx
| [./tts](./tts)| Example for text to speech|
| [./vad](./vad)| Example for voice activity detection|
| [./vad-with-non-streaming-asr](./vad-with-non-streaming-asr)| Example for voice activity detection with non-streaming speech recognition. You can use it to generate subtitles.|
| [./audio-tagging](./audio-tagging)| Example for audio tagging.|
## How to create an example in this folder
... ...
# https://dart.dev/guides/libraries/private-files
# Created by `dart pub`
.dart_tool/
... ...
# Introduction
This example shows how to use the Dart API from sherpa-onnx for audio tagging.
| File | Description|
|------|------------|
|[./bin/zipformer.dart](./bin/zipformer.dart)| Use a Zipformer model for audio tagging. See [./run-zipformer.sh](./run-zipformer.sh)|
|[./bin/ced.dart](./bin/ced.dart)| Use a [CED](https://github.com/RicherMans/CED) model for audio tagging. See [./run-ced.sh](./run-ced.sh)|
... ...
# This file configures the static analysis results for your project (errors,
# warnings, and lints).
#
# This enables the 'recommended' set of lints from `package:lints`.
# This set helps identify many issues that may lead to problems when running
# or consuming Dart code, and enforces writing Dart using a single, idiomatic
# style and format.
#
# If you want a smaller set of lints you can change this to specify
# 'package:lints/core.yaml'. These are just the most critical lints
# (the recommended set includes the core lints).
# The core lints are also what is used by pub.dev for scoring packages.
include: package:lints/recommended.yaml
# Uncomment the following section to specify additional rules.
# linter:
# rules:
# - camel_case_types
# analyzer:
# exclude:
# - path/to/excluded/files/**
# For more information about the core and recommended set of lints, see
# https://dart.dev/go/core-lints
# For additional information about configuring this file, see
# https://dart.dev/guides/language/analysis-options
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the zipformer model')
..addOption('labels', help: 'Path to class_labels_indices.csv')
..addOption('top-k', help: 'topK events to be returned', defaultsTo: '5')
..addOption('wav', help: 'Path to test.wav to be tagged');
final res = parser.parse(arguments);
if (res['model'] == null || res['labels'] == null || res['wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final labels = res['labels'] as String;
final topK = int.tryParse(res['top-k'] as String) ?? 5;
final wav = res['wav'] as String;
final modelConfig = sherpa_onnx.AudioTaggingModelConfig(
ced: model,
numThreads: 1,
debug: true,
provider: 'cpu',
);
final config = sherpa_onnx.AudioTaggingConfig(
model: modelConfig,
labels: labels,
);
final at = sherpa_onnx.AudioTagging(config: config);
final waveData = sherpa_onnx.readWave(wav);
final stream = at.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
final events = at.compute(stream: stream, topK: topK);
print(events);
stream.free();
at.free();
}
... ...
../../vad/bin/init.dart
\ No newline at end of file
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the zipformer model')
..addOption('labels', help: 'Path to class_labels_indices.csv')
..addOption('top-k', help: 'topK events to be returned', defaultsTo: '5')
..addOption('wav', help: 'Path to test.wav to be tagged');
final res = parser.parse(arguments);
if (res['model'] == null || res['labels'] == null || res['wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final labels = res['labels'] as String;
final topK = int.tryParse(res['top-k'] as String) ?? 5;
final wav = res['wav'] as String;
final zipformerModelConfig =
sherpa_onnx.OfflineZipformerAudioTaggingModelConfig(
model: model,
);
final modelConfig = sherpa_onnx.AudioTaggingModelConfig(
zipformer: zipformerModelConfig,
numThreads: 1,
debug: true,
provider: 'cpu',
);
final config = sherpa_onnx.AudioTaggingConfig(
model: modelConfig,
labels: labels,
);
final at = sherpa_onnx.AudioTagging(config: config);
final waveData = sherpa_onnx.readWave(wav);
final stream = at.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
final events = at.compute(stream: stream, topK: topK);
print(events);
stream.free();
at.free();
}
... ...
name: audio_tagging
description: >
This example demonstrates how to use the Dart API for audio tagging.
version: 1.0.0
environment:
sdk: ^3.4.0
dependencies:
sherpa_onnx: ^1.10.19
path: ^1.9.0
args: ^2.5.0
dev_dependencies:
lints: ^3.0.0
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [[ ! -f ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/model.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/audio-tagging-models/sherpa-onnx-ced-mini-audio-tagging-2024-04-19.tar.bz2
tar xvf sherpa-onnx-ced-mini-audio-tagging-2024-04-19.tar.bz2
rm sherpa-onnx-ced-mini-audio-tagging-2024-04-19.tar.bz2
fi
for w in 1 2 3 4 5 6; do
dart run \
./bin/ced.dart \
--model ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/model.int8.onnx \
--labels ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/class_labels_indices.csv \
--wav ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/test_wavs/$w.wav
done
... ...
#!/usr/bin/env bash
set -ex
dart pub get
if [[ ! -f ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/model.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/audio-tagging-models/sherpa-onnx-zipformer-audio-tagging-2024-04-09.tar.bz2
tar xvf sherpa-onnx-zipformer-audio-tagging-2024-04-09.tar.bz2
rm sherpa-onnx-zipformer-audio-tagging-2024-04-09.tar.bz2
fi
for w in 1 2 3 4 5 6; do
dart run \
./bin/zipformer.dart \
--model ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/model.int8.onnx \
--labels ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/class_labels_indices.csv \
--wav ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/test_wavs/$w.wav
done
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ... @@ -65,5 +64,5 @@ void main(List<String> arguments) async {
samples: audio.samples,
sampleRate: audio.sampleRate,
);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}
... ...
... ... @@ -80,5 +80,5 @@ void main(List<String> arguments) async {
samples: audio.samples,
sampleRate: audio.sampleRate,
);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
... ... @@ -82,5 +81,5 @@ void main(List<String> arguments) async {
samples: audio.samples,
sampleRate: audio.sampleRate,
);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}
... ...
... ... @@ -77,5 +77,5 @@ void main(List<String> arguments) async {
sherpa_onnx.writeWave(
filename: outputWav, samples: s, sampleRate: waveData.sampleRate);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}
... ...
... ... @@ -2,6 +2,7 @@
import 'dart:io';
import 'dart:ffi';
export 'src/audio_tagging.dart';
export 'src/feature_config.dart';
export 'src/keyword_spotter.dart';
export 'src/offline_recognizer.dart';
... ...
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:ffi';
import 'package:ffi/ffi.dart';
import './offline_stream.dart';
import './sherpa_onnx_bindings.dart';
class OfflineZipformerAudioTaggingModelConfig {
const OfflineZipformerAudioTaggingModelConfig({this.model = ''});
@override
String toString() {
return 'OfflineZipformerAudioTaggingModelConfig(model: $model)';
}
final String model;
}
class AudioTaggingModelConfig {
AudioTaggingModelConfig(
{this.zipformer = const OfflineZipformerAudioTaggingModelConfig(),
this.ced = '',
this.numThreads = 1,
this.provider = 'cpu',
this.debug = true});
@override
String toString() {
return 'AudioTaggingModelConfig(zipformer: $zipformer, ced: $ced, numThreads: $numThreads, provider: $provider, debug: $debug)';
}
final OfflineZipformerAudioTaggingModelConfig zipformer;
final String ced;
final int numThreads;
final String provider;
final bool debug;
}
class AudioTaggingConfig {
AudioTaggingConfig({required this.model, this.labels = ''});
@override
String toString() {
return 'AudioTaggingConfig(model: $model, labels: $labels)';
}
final AudioTaggingModelConfig model;
final String labels;
}
class AudioEvent {
AudioEvent({required this.name, required this.index, required this.prob});
@override
String toString() {
return 'AudioEvent(name: $name, index: $index, prob: $prob)';
}
final String name;
final int index;
final double prob;
}
class AudioTagging {
AudioTagging._({required this.ptr, required this.config});
// The user has to invoke AudioTagging.free() to avoid memory leak.
factory AudioTagging({required AudioTaggingConfig config}) {
final c = calloc<SherpaOnnxAudioTaggingConfig>();
final zipformerPtr = config.model.zipformer.model.toNativeUtf8();
c.ref.model.zipformer.model = zipformerPtr;
final cedPtr = config.model.ced.toNativeUtf8();
c.ref.model.ced = cedPtr;
c.ref.model.numThreads = config.model.numThreads;
final providerPtr = config.model.provider.toNativeUtf8();
c.ref.model.provider = providerPtr;
c.ref.model.debug = config.model.debug ? 1 : 0;
final labelsPtr = config.labels.toNativeUtf8();
c.ref.labels = labelsPtr;
final ptr =
SherpaOnnxBindings.sherpaOnnxCreateAudioTagging?.call(c) ?? nullptr;
calloc.free(labelsPtr);
calloc.free(providerPtr);
calloc.free(cedPtr);
calloc.free(zipformerPtr);
calloc.free(c);
return AudioTagging._(ptr: ptr, config: config);
}
void free() {
SherpaOnnxBindings.sherpaOnnxDestroyAudioTagging?.call(ptr);
ptr = nullptr;
}
/// The user has to invoke stream.free() on the returned instance
/// to avoid memory leak
OfflineStream createStream() {
final p = SherpaOnnxBindings.sherpaOnnxAudioTaggingCreateOfflineStream
?.call(ptr) ??
nullptr;
return OfflineStream(ptr: p);
}
List<AudioEvent> compute({required OfflineStream stream, required int topK}) {
final pp = SherpaOnnxBindings.sherpaOnnxAudioTaggingCompute
?.call(ptr, stream.ptr, topK) ??
nullptr;
final ans = <AudioEvent>[];
if (pp == nullptr) {
return ans;
}
var i = 0;
while (pp[i] != nullptr) {
final p = pp[i];
final name = p.ref.name.toDartString();
final index = p.ref.index;
final prob = p.ref.prob;
final e = AudioEvent(name: name, index: index, prob: prob);
ans.add(e);
i += 1;
}
SherpaOnnxBindings.sherpaOnnxAudioTaggingFreeResults?.call(pp);
return ans;
}
Pointer<SherpaOnnxAudioTagging> ptr;
final AudioTaggingConfig config;
}
... ...
... ... @@ -2,6 +2,41 @@
import 'dart:ffi';
import 'package:ffi/ffi.dart';
final class SherpaOnnxOfflineZipformerAudioTaggingModelConfig extends Struct {
external Pointer<Utf8> model;
}
final class SherpaOnnxAudioTaggingModelConfig extends Struct {
external SherpaOnnxOfflineZipformerAudioTaggingModelConfig zipformer;
external Pointer<Utf8> ced;
@Int32()
external int numThreads;
@Int32()
external int debug;
external Pointer<Utf8> provider;
}
final class SherpaOnnxAudioTaggingConfig extends Struct {
external SherpaOnnxAudioTaggingModelConfig model;
external Pointer<Utf8> labels;
@Int32()
external int topK;
}
final class SherpaOnnxAudioEvent extends Struct {
external Pointer<Utf8> name;
@Int32()
external int index;
@Float()
external double prob;
}
final class SherpaOnnxOfflineTtsVitsModelConfig extends Struct {
external Pointer<Utf8> model;
external Pointer<Utf8> lexicon;
... ... @@ -303,6 +338,8 @@ final class SherpaOnnxKeywordSpotterConfig extends Struct {
external Pointer<Utf8> keywordsFile;
}
final class SherpaOnnxAudioTagging extends Opaque {}
final class SherpaOnnxKeywordSpotter extends Opaque {}
final class SherpaOnnxOfflineTts extends Opaque {}
... ... @@ -323,6 +360,40 @@ final class SherpaOnnxSpeakerEmbeddingExtractor extends Opaque {}
final class SherpaOnnxSpeakerEmbeddingManager extends Opaque {}
typedef SherpaOnnxCreateAudioTaggingNative = Pointer<SherpaOnnxAudioTagging>
Function(Pointer<SherpaOnnxAudioTaggingConfig>);
typedef SherpaOnnxCreateAudioTagging = SherpaOnnxCreateAudioTaggingNative;
typedef SherpaOnnxDestroyAudioTaggingNative = Void Function(
Pointer<SherpaOnnxAudioTagging>);
typedef SherpaOnnxDestroyAudioTagging = void Function(
Pointer<SherpaOnnxAudioTagging>);
typedef SherpaOnnxAudioTaggingCreateOfflineStreamNative
= Pointer<SherpaOnnxOfflineStream> Function(
Pointer<SherpaOnnxAudioTagging>);
typedef SherpaOnnxAudioTaggingCreateOfflineStream
= SherpaOnnxAudioTaggingCreateOfflineStreamNative;
typedef SherpaOnnxAudioTaggingComputeNative
= Pointer<Pointer<SherpaOnnxAudioEvent>> Function(
Pointer<SherpaOnnxAudioTagging>,
Pointer<SherpaOnnxOfflineStream>,
Int32);
typedef SherpaOnnxAudioTaggingCompute
= Pointer<Pointer<SherpaOnnxAudioEvent>> Function(
Pointer<SherpaOnnxAudioTagging>, Pointer<SherpaOnnxOfflineStream>, int);
typedef SherpaOnnxAudioTaggingFreeResultsNative = Void Function(
Pointer<Pointer<SherpaOnnxAudioEvent>>);
typedef SherpaOnnxAudioTaggingFreeResults = void Function(
Pointer<Pointer<SherpaOnnxAudioEvent>>);
typedef CreateKeywordSpotterNative = Pointer<SherpaOnnxKeywordSpotter> Function(
Pointer<SherpaOnnxKeywordSpotterConfig>);
... ... @@ -804,6 +875,13 @@ typedef SherpaOnnxFreeWaveNative = Void Function(Pointer<SherpaOnnxWave>);
typedef SherpaOnnxFreeWave = void Function(Pointer<SherpaOnnxWave>);
class SherpaOnnxBindings {
static SherpaOnnxCreateAudioTagging? sherpaOnnxCreateAudioTagging;
static SherpaOnnxDestroyAudioTagging? sherpaOnnxDestroyAudioTagging;
static SherpaOnnxAudioTaggingCreateOfflineStream?
sherpaOnnxAudioTaggingCreateOfflineStream;
static SherpaOnnxAudioTaggingCompute? sherpaOnnxAudioTaggingCompute;
static SherpaOnnxAudioTaggingFreeResults? sherpaOnnxAudioTaggingFreeResults;
static CreateKeywordSpotter? createKeywordSpotter;
static DestroyKeywordSpotter? destroyKeywordSpotter;
static CreateKeywordStream? createKeywordStream;
... ... @@ -958,6 +1036,33 @@ class SherpaOnnxBindings {
static SherpaOnnxFreeWave? freeWave;
static void init(DynamicLibrary dynamicLibrary) {
sherpaOnnxCreateAudioTagging ??= dynamicLibrary
.lookup<NativeFunction<SherpaOnnxCreateAudioTaggingNative>>(
'SherpaOnnxCreateAudioTagging')
.asFunction();
sherpaOnnxDestroyAudioTagging ??= dynamicLibrary
.lookup<NativeFunction<SherpaOnnxDestroyAudioTaggingNative>>(
'SherpaOnnxDestroyAudioTagging')
.asFunction();
sherpaOnnxAudioTaggingCreateOfflineStream ??= dynamicLibrary
.lookup<
NativeFunction<
SherpaOnnxAudioTaggingCreateOfflineStreamNative>>(
'SherpaOnnxAudioTaggingCreateOfflineStream')
.asFunction();
sherpaOnnxAudioTaggingCompute ??= dynamicLibrary
.lookup<NativeFunction<SherpaOnnxAudioTaggingComputeNative>>(
'SherpaOnnxAudioTaggingCompute')
.asFunction();
sherpaOnnxAudioTaggingFreeResults ??= dynamicLibrary
.lookup<NativeFunction<SherpaOnnxAudioTaggingFreeResultsNative>>(
'SherpaOnnxAudioTaggingFreeResults')
.asFunction();
createKeywordSpotter ??= dynamicLibrary
.lookup<NativeFunction<CreateKeywordSpotterNative>>(
'SherpaOnnxCreateKeywordSpotter')
... ...
name: audio_tagging
description: >
This example demonstrates how to use the Dart API for audio tagging.
version: 1.0.0
environment:
sdk: ^3.4.0
dependencies:
sherpa_onnx:
path: ../../flutter/sherpa_onnx
path: ^1.9.0
args: ^2.5.0
dev_dependencies:
lints: ^3.0.0
... ...