Fangjun Kuang
Committed by GitHub

Add Golang API for VAD (#708)

... ... @@ -86,3 +86,4 @@ vits-piper-*
vits-coqui-*
vits-mms-*
*.tar.bz2
sherpa-onnx-paraformer-trilingual-zh-cantonese-en
... ...
... ... @@ -6,4 +6,21 @@ Please refer to the documentation
https://k2-fsa.github.io/sherpa/onnx/go-api/index.html
for details.
- [./non-streaming-decode-files](./non-streaming-decode-files) It shows how to use
a non-streaming ASR model to decode files
- [./non-streaming-tts](./non-streaming-tts) It shows how to use a non-streaming TTS
model to convert text to speech
- [./real-time-speech-recognition-from-microphone](./real-time-speech-recognition-from-microphone)
It shows how to use a streaming ASR model to recognize speech from a microphone in real-time
- [./vad](./vad) It shows how to use silero VAD with Golang.
- [./vad-asr-whisper](./vad-asr-whisper) It shows how to use silero VAD + Whisper
for speech recognition.
- [./vad-asr-paraformer](./vad-asr-paraformer) It shows how to use silero VAD + Paraformer
for speech recognition.
[sherpa-onnx]: https://github.com/k2-fsa/sherpa-onnx
... ...
... ... @@ -57,8 +57,7 @@ func main() {
log.Println("Done!")
ok := audio.Save(filename)
if ok != 1 {
if !ok {
log.Fatalf("Failed to write", filename)
}
}
... ...
module vad-asr-paraformer
go 1.12
... ...
package main
import (
"fmt"
"github.com/gordonklaus/portaudio"
sherpa "github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx"
"log"
"strings"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
// 1. Create VAD
config := sherpa.VadModelConfig{}
// Please download silero_vad.onnx from
// https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
config.SileroVad.Model = "./silero_vad.onnx"
config.SileroVad.Threshold = 0.5
config.SileroVad.MinSilenceDuration = 0.5
config.SileroVad.MinSpeechDuration = 0.25
config.SileroVad.WindowSize = 512
config.SampleRate = 16000
config.NumThreads = 1
config.Provider = "cpu"
config.Debug = 1
var bufferSizeInSeconds float32 = 20
vad := sherpa.NewVoiceActivityDetector(&config, bufferSizeInSeconds)
defer sherpa.DeleteVoiceActivityDetector(vad)
// 2. Create ASR recognizer
c := sherpa.OfflineRecognizerConfig{}
c.FeatConfig.SampleRate = 16000
c.FeatConfig.FeatureDim = 80
// Please download the model from
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-trilingual-zh-cantonese-en.tar.bz2
c.ModelConfig.Paraformer.Model = "./sherpa-onnx-paraformer-trilingual-zh-cantonese-en/model.int8.onnx"
c.ModelConfig.Tokens = "./sherpa-onnx-paraformer-trilingual-zh-cantonese-en/tokens.txt"
c.ModelConfig.NumThreads = 2
c.ModelConfig.Debug = 1
c.ModelConfig.Provider = "cpu"
recognizer := sherpa.NewOfflineRecognizer(&c)
defer sherpa.DeleteOfflineRecognizer(recognizer)
err := portaudio.Initialize()
if err != nil {
log.Fatalf("Unable to initialize portaudio: %v\n", err)
}
defer portaudio.Terminate()
default_device, err := portaudio.DefaultInputDevice()
if err != nil {
log.Fatal("Failed to get default input device: %v\n", err)
}
log.Printf("Selected default input device: %s\n", default_device.Name)
param := portaudio.StreamParameters{}
param.Input.Device = default_device
param.Input.Channels = 1
param.Input.Latency = default_device.DefaultHighInputLatency
param.SampleRate = float64(config.SampleRate)
param.FramesPerBuffer = 0
param.Flags = portaudio.ClipOff
// you can choose another value for 0.1 if you want
samplesPerCall := int32(param.SampleRate * 0.1) // 0.1 second
samples := make([]float32, samplesPerCall)
s, err := portaudio.OpenStream(param, samples)
if err != nil {
log.Fatalf("Failed to open the stream")
}
defer s.Close()
chk(s.Start())
log.Print("Started! Please speak")
printed := false
k := 0
for {
chk(s.Read())
vad.AcceptWaveform(samples)
if vad.IsSpeech() && !printed {
printed = true
log.Print("Detected speech\n")
}
if !vad.IsSpeech() {
printed = false
}
for !vad.IsEmpty() {
speechSegment := vad.Front()
vad.Pop()
duration := float32(len(speechSegment.Samples)) / float32(config.SampleRate)
audio := &sherpa.GeneratedAudio{}
audio.Samples = speechSegment.Samples
audio.SampleRate = config.SampleRate
// Now decode it
go decode(recognizer, audio, k)
k += 1
log.Printf("Duration: %.2f seconds\n", duration)
}
}
chk(s.Stop())
}
func decode(recognizer *sherpa.OfflineRecognizer, audio *sherpa.GeneratedAudio, id int) {
stream := sherpa.NewOfflineStream(recognizer)
defer sherpa.DeleteOfflineStream(stream)
stream.AcceptWaveform(audio.SampleRate, audio.Samples)
recognizer.Decode(stream)
result := stream.GetResult()
text := strings.ToLower(result.Text)
text = strings.Trim(text, " ")
log.Println(text)
duration := float32(len(audio.Samples)) / float32(audio.SampleRate)
filename := fmt.Sprintf("seg-%d-%.2f-seconds-%s.wav", id, duration, text)
ok := audio.Save(filename)
if ok {
log.Printf("Saved to %s", filename)
}
log.Print("----------\n")
}
func chk(err error) {
if err != nil {
panic(err)
}
}
... ...
#!/usr/bin/env bash
if [ ! -f ./silero_vad.onnx ]; then
curl -SL -O https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
fi
if [ ! -f ./sherpa-onnx-paraformer-trilingual-zh-cantonese-en/model.int8.onnx ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-trilingual-zh-cantonese-en.tar.bz2
tar xvf sherpa-onnx-paraformer-trilingual-zh-cantonese-en.tar.bz2
rm sherpa-onnx-paraformer-trilingual-zh-cantonese-en.tar.bz2
fi
go mod tidy
go build
./vad-asr-paraformer
... ...
module vad-asr-whisper
go 1.12
... ...
package main
import (
"fmt"
"github.com/gordonklaus/portaudio"
sherpa "github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx"
"log"
"strings"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
// 1. Create VAD
config := sherpa.VadModelConfig{}
// Please download silero_vad.onnx from
// https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
config.SileroVad.Model = "./silero_vad.onnx"
config.SileroVad.Threshold = 0.5
config.SileroVad.MinSilenceDuration = 0.5
config.SileroVad.MinSpeechDuration = 0.25
config.SileroVad.WindowSize = 512
config.SampleRate = 16000
config.NumThreads = 1
config.Provider = "cpu"
config.Debug = 1
var bufferSizeInSeconds float32 = 20
vad := sherpa.NewVoiceActivityDetector(&config, bufferSizeInSeconds)
defer sherpa.DeleteVoiceActivityDetector(vad)
// 2. Create ASR recognizer
c := sherpa.OfflineRecognizerConfig{}
c.FeatConfig.SampleRate = 16000
c.FeatConfig.FeatureDim = 80
c.ModelConfig.Whisper.Encoder = "./sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx"
c.ModelConfig.Whisper.Decoder = "./sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx"
c.ModelConfig.Tokens = "./sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt"
c.ModelConfig.NumThreads = 2
c.ModelConfig.Debug = 1
c.ModelConfig.Provider = "cpu"
recognizer := sherpa.NewOfflineRecognizer(&c)
defer sherpa.DeleteOfflineRecognizer(recognizer)
err := portaudio.Initialize()
if err != nil {
log.Fatalf("Unable to initialize portaudio: %v\n", err)
}
defer portaudio.Terminate()
default_device, err := portaudio.DefaultInputDevice()
if err != nil {
log.Fatal("Failed to get default input device: %v\n", err)
}
log.Printf("Selected default input device: %s\n", default_device.Name)
param := portaudio.StreamParameters{}
param.Input.Device = default_device
param.Input.Channels = 1
param.Input.Latency = default_device.DefaultHighInputLatency
param.SampleRate = float64(config.SampleRate)
param.FramesPerBuffer = 0
param.Flags = portaudio.ClipOff
// you can choose another value for 0.1 if you want
samplesPerCall := int32(param.SampleRate * 0.1) // 0.1 second
samples := make([]float32, samplesPerCall)
s, err := portaudio.OpenStream(param, samples)
if err != nil {
log.Fatalf("Failed to open the stream")
}
defer s.Close()
chk(s.Start())
log.Print("Started! Please speak")
printed := false
k := 0
for {
chk(s.Read())
vad.AcceptWaveform(samples)
if vad.IsSpeech() && !printed {
printed = true
log.Print("Detected speech\n")
}
if !vad.IsSpeech() {
printed = false
}
for !vad.IsEmpty() {
speechSegment := vad.Front()
vad.Pop()
duration := float32(len(speechSegment.Samples)) / float32(config.SampleRate)
audio := &sherpa.GeneratedAudio{}
audio.Samples = speechSegment.Samples
audio.SampleRate = config.SampleRate
// Now decode it
go decode(recognizer, audio, k)
k += 1
log.Printf("Duration: %.2f seconds\n", duration)
}
}
chk(s.Stop())
}
func decode(recognizer *sherpa.OfflineRecognizer, audio *sherpa.GeneratedAudio, id int) {
stream := sherpa.NewOfflineStream(recognizer)
defer sherpa.DeleteOfflineStream(stream)
stream.AcceptWaveform(audio.SampleRate, audio.Samples)
recognizer.Decode(stream)
result := stream.GetResult()
text := strings.ToLower(result.Text)
text = strings.Trim(text, " ")
log.Println(text)
duration := float32(len(audio.Samples)) / float32(audio.SampleRate)
filename := fmt.Sprintf("seg-%d-%.2f-seconds-%s.wav", id, duration, text)
ok := audio.Save(filename)
if ok {
log.Printf("Saved to %s", filename)
}
log.Print("----------\n")
}
func chk(err error) {
if err != nil {
panic(err)
}
}
... ...
#!/usr/bin/env bash
if [ ! -f ./silero_vad.onnx ]; then
curl -SL -O https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
fi
if [ ! -f ./sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
rm sherpa-onnx-whisper-tiny.en.tar.bz2
fi
go mod tidy
go build
./vad-asr-whisper
... ...
module vad
go 1.12
... ...
package main
import (
"fmt"
"github.com/gordonklaus/portaudio"
sherpa "github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx"
"log"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
config := sherpa.VadModelConfig{}
// Please download silero_vad.onnx from
// https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
config.SileroVad.Model = "./silero_vad.onnx"
config.SileroVad.Threshold = 0.5
config.SileroVad.MinSilenceDuration = 0.5
config.SileroVad.MinSpeechDuration = 0.25
config.SileroVad.WindowSize = 512
config.SampleRate = 16000
config.NumThreads = 1
config.Provider = "cpu"
config.Debug = 1
var bufferSizeInSeconds float32 = 5
vad := sherpa.NewVoiceActivityDetector(&config, bufferSizeInSeconds)
defer sherpa.DeleteVoiceActivityDetector(vad)
err := portaudio.Initialize()
if err != nil {
log.Fatalf("Unable to initialize portaudio: %v\n", err)
}
defer portaudio.Terminate()
default_device, err := portaudio.DefaultInputDevice()
if err != nil {
log.Fatal("Failed to get default input device: %v\n", err)
}
log.Printf("Selected default input device: %s\n", default_device.Name)
param := portaudio.StreamParameters{}
param.Input.Device = default_device
param.Input.Channels = 1
param.Input.Latency = default_device.DefaultLowInputLatency
param.SampleRate = float64(config.SampleRate)
param.FramesPerBuffer = 0
param.Flags = portaudio.ClipOff
// you can choose another value for 0.1 if you want
samplesPerCall := int32(param.SampleRate * 0.1) // 0.1 second
samples := make([]float32, samplesPerCall)
s, err := portaudio.OpenStream(param, samples)
if err != nil {
log.Fatalf("Failed to open the stream")
}
defer s.Close()
chk(s.Start())
log.Print("Started! Please speak")
printed := false
k := 0
for {
chk(s.Read())
vad.AcceptWaveform(samples)
if vad.IsSpeech() && !printed {
printed = true
log.Print("Detected speech\n")
}
if !vad.IsSpeech() {
printed = false
}
for !vad.IsEmpty() {
speechSegment := vad.Front()
vad.Pop()
duration := float32(len(speechSegment.Samples)) / float32(config.SampleRate)
audio := sherpa.GeneratedAudio{}
audio.Samples = speechSegment.Samples
audio.SampleRate = config.SampleRate
filename := fmt.Sprintf("seg-%d-%.2f-seconds.wav", k, duration)
ok := audio.Save(filename)
if ok {
log.Printf("Saved to %s", filename)
}
k += 1
log.Printf("Duration: %.2f seconds\n", duration)
log.Print("----------\n")
}
}
chk(s.Stop())
}
func chk(err error) {
if err != nil {
panic(err)
}
}
... ...
#!/usr/bin/env bash
if [ ! -f ./silero_vad.onnx ]; then
curl -SL -O https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
fi
go mod tidy
go build
./vad
... ...
... ... @@ -235,6 +235,12 @@ def get_vits_models() -> List[TtsModel]:
return [
# Chinese
TtsModel(
model_dir="vits-icefall-zh-aishell3",
model_name="model.onnx",
lang="zh",
rule_fsts="vits-icefall-zh-aishell3/phone.fst,vits-icefall-zh-aishell3/date.fst,vits-icefall-zh-aishell3/rule.fst",
),
TtsModel(
model_dir="vits-zh-aishell3",
model_name="vits-aishell3.onnx",
lang="zh",
... ...
module vad-asr-paraformer
go 1.12
replace github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx => ../
require (
github.com/gordonklaus/portaudio v0.0.0-20230709114228-aafa478834f5
github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx v0.0.0-00010101000000-000000000000
)
... ...
../../../../go-api-examples/vad-asr-paraformer/main.go
\ No newline at end of file
... ...
../../../../go-api-examples/vad-asr-paraformer/run.sh
\ No newline at end of file
... ...
module vad-asr-whisper
go 1.12
replace github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx => ../
... ...
../../../../go-api-examples/vad-asr-whisper/main.go
\ No newline at end of file
... ...
../../../../go-api-examples/vad-asr-whisper/run.sh
\ No newline at end of file
... ...
module vad
go 1.12
replace github.com/k2-fsa/sherpa-onnx-go/sherpa_onnx => ../
... ...
../../../../go-api-examples/vad/main.go
\ No newline at end of file
... ...
../../../../go-api-examples/vad/run.sh
\ No newline at end of file
... ...
... ... @@ -614,6 +614,9 @@ func (tts *OfflineTts) Generate(text string, sid int, speed float32) *GeneratedA
ans.SampleRate = int(audio.sample_rate)
n := int(audio.n)
ans.Samples = make([]float32, n)
// see https://stackoverflow.com/questions/48756732/what-does-1-30c-yourtype-do-exactly-in-cgo
// :n:n means 0:n:n, means low:high:capacity
samples := (*[1 << 28]C.float)(unsafe.Pointer(audio.samples))[:n:n]
// copy(ans.Samples, samples)
for i := 0; i < n; i++ {
... ... @@ -623,11 +626,160 @@ func (tts *OfflineTts) Generate(text string, sid int, speed float32) *GeneratedA
return ans
}
func (audio *GeneratedAudio) Save(filename string) int {
func (audio *GeneratedAudio) Save(filename string) bool {
s := C.CString(filename)
defer C.free(unsafe.Pointer(s))
ok := int(C.SherpaOnnxWriteWave((*C.float)(&audio.Samples[0]), C.int(len(audio.Samples)), C.int(audio.SampleRate), s))
return ok
return ok == 1
}
// ============================================================
// For VAD
// ============================================================
type SileroVadModelConfig struct {
Model string
Threshold float32
MinSilenceDuration float32
MinSpeechDuration float32
WindowSize int
}
type VadModelConfig struct {
SileroVad SileroVadModelConfig
SampleRate int
NumThreads int
Provider string
Debug int
}
type CircularBuffer struct {
impl *C.struct_SherpaOnnxCircularBuffer
}
func DeleteCircularBuffer(buffer *CircularBuffer) {
C.SherpaOnnxDestroyCircularBuffer(buffer.impl)
buffer.impl = nil
}
func NewCircularBuffer(capacity int) *CircularBuffer {
circularBuffer := &CircularBuffer{}
circularBuffer.impl = C.SherpaOnnxCreateCircularBuffer(C.int(capacity))
return circularBuffer
}
func (buffer *CircularBuffer) Push(samples []float32) {
C.SherpaOnnxCircularBufferPush(buffer.impl, (*C.float)(&samples[0]), C.int(len(samples)))
}
func (buffer *CircularBuffer) Get(start int, n int) []float32 {
samples := C.SherpaOnnxCircularBufferGet(buffer.impl, C.int(start), C.int(n))
defer C.SherpaOnnxCircularBufferFree(samples)
result := make([]float32, n)
p := (*[1 << 28]C.float)(unsafe.Pointer(samples))[:n:n]
for i := 0; i < n; i++ {
result[i] = float32(p[i])
}
return result
}
func (buffer *CircularBuffer) Pop(n int) {
C.SherpaOnnxCircularBufferPop(buffer.impl, C.int(n))
}
func (buffer *CircularBuffer) Size() int {
return int(C.SherpaOnnxCircularBufferSize(buffer.impl))
}
func (buffer *CircularBuffer) Head() int {
return int(C.SherpaOnnxCircularBufferHead(buffer.impl))
}
func (buffer *CircularBuffer) Reset() {
C.SherpaOnnxCircularBufferReset(buffer.impl)
}
type SpeechSegment struct {
Start int
Samples []float32
}
type VoiceActivityDetector struct {
impl *C.struct_SherpaOnnxVoiceActivityDetector
}
func NewVoiceActivityDetector(config *VadModelConfig, bufferSizeInSeconds float32) *VoiceActivityDetector {
c := C.struct_SherpaOnnxVadModelConfig{}
c.silero_vad.model = C.CString(config.SileroVad.Model)
defer C.free(unsafe.Pointer(c.silero_vad.model))
c.silero_vad.threshold = C.float(config.SileroVad.Threshold)
c.silero_vad.min_silence_duration = C.float(config.SileroVad.MinSilenceDuration)
c.silero_vad.min_speech_duration = C.float(config.SileroVad.MinSpeechDuration)
c.silero_vad.window_size = C.int(config.SileroVad.WindowSize)
c.sample_rate = C.int(config.SampleRate)
c.num_threads = C.int(config.NumThreads)
c.provider = C.CString(config.Provider)
defer C.free(unsafe.Pointer(c.provider))
c.debug = C.int(config.Debug)
vad := &VoiceActivityDetector{}
vad.impl = C.SherpaOnnxCreateVoiceActivityDetector(&c, C.float(bufferSizeInSeconds))
return vad
}
func DeleteVoiceActivityDetector(vad *VoiceActivityDetector) {
C.SherpaOnnxDestroyVoiceActivityDetector(vad.impl)
vad.impl = nil
}
func (vad *VoiceActivityDetector) AcceptWaveform(samples []float32) {
C.SherpaOnnxVoiceActivityDetectorAcceptWaveform(vad.impl, (*C.float)(&samples[0]), C.int(len(samples)))
}
func (vad *VoiceActivityDetector) IsEmpty() bool {
return 1 == int(C.SherpaOnnxVoiceActivityDetectorEmpty(vad.impl))
}
func (vad *VoiceActivityDetector) IsSpeech() bool {
return 1 == int(C.SherpaOnnxVoiceActivityDetectorDetected(vad.impl))
}
func (vad *VoiceActivityDetector) Pop() {
C.SherpaOnnxVoiceActivityDetectorPop(vad.impl)
}
func (vad *VoiceActivityDetector) Clear() {
C.SherpaOnnxVoiceActivityDetectorClear(vad.impl)
}
func (vad *VoiceActivityDetector) Front() *SpeechSegment {
f := C.SherpaOnnxVoiceActivityDetectorFront(vad.impl)
defer C.SherpaOnnxDestroySpeechSegment(f)
ans := &SpeechSegment{}
ans.Start = int(f.start)
n := int(f.n)
ans.Samples = make([]float32, n)
samples := (*[1 << 28]C.float)(unsafe.Pointer(f.samples))[:n:n]
for i := 0; i < n; i++ {
ans.Samples[i] = float32(samples[i])
}
return ans
}
func (vad *VoiceActivityDetector) Reset() {
C.SherpaOnnxVoiceActivityDetectorReset(vad.impl)
}
... ...
... ... @@ -309,6 +309,9 @@ SherpaOnnxOfflineRecognizer *CreateOfflineRecognizer(
recognizer_config.model_config.whisper.task =
SHERPA_ONNX_OR(config->model_config.whisper.task, "transcribe");
if (recognizer_config.model_config.whisper.task.empty()) {
recognizer_config.model_config.whisper.task = "transcribe";
}
recognizer_config.model_config.tdnn.model =
SHERPA_ONNX_OR(config->model_config.tdnn.model, "");
... ... @@ -331,6 +334,11 @@ SherpaOnnxOfflineRecognizer *CreateOfflineRecognizer(
recognizer_config.decoding_method =
SHERPA_ONNX_OR(config->decoding_method, "greedy_search");
if (recognizer_config.decoding_method.empty()) {
recognizer_config.decoding_method = "greedy_search";
}
recognizer_config.max_active_paths =
SHERPA_ONNX_OR(config->max_active_paths, 4);
... ...