Fangjun Kuang
Committed by GitHub

Play generated audio as it is generating. (#457)

... ... @@ -143,6 +143,7 @@ class BuildExtension(build_ext):
binaries += ["sherpa-onnx-vad-microphone"]
binaries += ["sherpa-onnx-vad-microphone-offline-asr"]
binaries += ["sherpa-onnx-offline-tts"]
binaries += ["sherpa-onnx-offline-tts-play"]
if is_windows():
binaries += ["kaldi-native-fbank-core.dll"]
... ...
... ... @@ -5,6 +5,7 @@ function(download_espeak_ng_for_piper)
set(espeak_ng_URL2 "")
set(espeak_ng_HASH "SHA256=8a48251e6926133dd91fcf6cb210c7c2e290a9b578d269446e2d32d710b0dfa0")
set(BUILD_ESPEAK_NG_TESTS OFF CACHE BOOL "" FORCE)
set(USE_ASYNC OFF CACHE BOOL "" FORCE)
set(USE_MBROLA OFF CACHE BOOL "" FORCE)
set(USE_LIBSONIC OFF CACHE BOOL "" FORCE)
... ... @@ -106,10 +107,12 @@ function(download_espeak_ng_for_piper)
if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32)
install(TARGETS
espeak-ng
ucd
DESTINATION ..)
else()
install(TARGETS
espeak-ng
ucd
DESTINATION lib)
endif()
... ... @@ -120,6 +123,7 @@ function(download_espeak_ng_for_piper)
if(WIN32 AND BUILD_SHARED_LIBS)
install(TARGETS
espeak-ng
ucd
DESTINATION bin)
endif()
endfunction()
... ...
... ... @@ -14,6 +14,9 @@
sherpa-onnx-fst.lib;
kaldi-native-fbank-core.lib;
onnxruntime.lib;
piper_phonemize.lib;
espeak-ng.lib;
ucd.lib;
</SherpaOnnxLibraries>
</PropertyGroup>
<ItemDefinitionGroup>
... ...
... ... @@ -14,6 +14,9 @@
sherpa-onnx-fst.lib;
kaldi-native-fbank-core.lib;
onnxruntime.lib;
piper_phonemize.lib;
espeak-ng.lib;
ucd.lib;
</SherpaOnnxLibraries>
</PropertyGroup>
<ItemDefinitionGroup>
... ...
... ... @@ -14,6 +14,9 @@
sherpa-onnx-fst.lib;
kaldi-native-fbank-core.lib;
onnxruntime.lib;
piper_phonemize.lib;
espeak-ng.lib;
ucd.lib;
</SherpaOnnxLibraries>
</PropertyGroup>
<ItemDefinitionGroup>
... ...
#!/usr/bin/env python3
#
# Copyright (c) 2023 Xiaomi Corporation
"""
This file demonstrates how to use sherpa-onnx Python API to generate audio
from text, i.e., text-to-speech.
Different from ./offline-tts.py, this file plays back the generated audio
while the model is still generating.
Usage:
Example (1/2)
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
python3 ./python-api-examples/offline-tts-play.py \
--vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \
--vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \
--vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \
--output-filename=./generated.wav \
"Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar."
Example (2/2)
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-zh-aishell3.tar.bz2
tar xvf vits-zh-aishell3.tar.bz2
python3 ./python-api-examples/offline-tts-play.py \
--vits-model=./vits-aishell3.onnx \
--vits-lexicon=./lexicon.txt \
--vits-tokens=./tokens.txt \
--tts-rule-fsts=./rule.fst \
--sid=21 \
--output-filename=./liubei-21.wav \
"勿以恶小而为之,勿以善小而不为。惟贤惟德,能服于人。122334"
You can find more models at
https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
Please see
https://k2-fsa.github.io/sherpa/onnx/tts/index.html
for details.
"""
import argparse
import logging
import queue
import sys
import threading
import time
import numpy as np
import sherpa_onnx
import soundfile as sf
try:
import sounddevice as sd
except ImportError:
print("Please install sounddevice first. You can use")
print()
print(" pip install sounddevice")
print()
print("to install it")
sys.exit(-1)
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--vits-model",
type=str,
help="Path to vits model.onnx",
)
parser.add_argument(
"--vits-lexicon",
type=str,
default="",
help="Path to lexicon.txt",
)
parser.add_argument(
"--vits-tokens",
type=str,
default="",
help="Path to tokens.txt",
)
parser.add_argument(
"--vits-data-dir",
type=str,
default="",
help="""Path to the dict director of espeak-ng. If it is specified,
--vits-lexicon and --vits-tokens are ignored""",
)
parser.add_argument(
"--tts-rule-fsts",
type=str,
default="",
help="Path to rule.fst",
)
parser.add_argument(
"--output-filename",
type=str,
default="./generated.wav",
help="Path to save generated wave",
)
parser.add_argument(
"--sid",
type=int,
default=0,
help="""Speaker ID. Used only for multi-speaker models, e.g.
models trained using the VCTK dataset. Not used for single-speaker
models, e.g., models trained using the LJ speech dataset.
""",
)
parser.add_argument(
"--debug",
type=bool,
default=False,
help="True to show debug messages",
)
parser.add_argument(
"--provider",
type=str,
default="cpu",
help="valid values: cpu, cuda, coreml",
)
parser.add_argument(
"--num-threads",
type=int,
default=1,
help="Number of threads for neural network computation",
)
parser.add_argument(
"--speed",
type=float,
default=1.0,
help="Speech speed. Larger->faster; smaller->slower",
)
parser.add_argument(
"text",
type=str,
help="The input text to generate audio for",
)
return parser.parse_args()
# buffer saves audio samples to be played
buffer = queue.Queue()
# started is set to True once generated_audio_callback is called.
started = False
# stopped is set to True once all the text has been processed
stopped = False
# killed is set to True once ctrl + C is pressed
killed = False
# Note: When started is True, and stopped is True, and buffer is empty,
# we will exit the program since all audio samples have been played.
sample_rate = None
event = threading.Event()
def generated_audio_callback(samples: np.ndarray):
"""This function is called whenever max_num_sentences sentences
have been processed.
Note that it is passed to C++ and is invoked in C++.
Args:
samples:
A 1-D np.float32 array containing audio samples
"""
buffer.put(samples)
global started
if started is False:
logging.info("Start playing ...")
started = True
# see https://python-sounddevice.readthedocs.io/en/0.4.6/api/streams.html#sounddevice.OutputStream
def play_audio_callback(
outdata: np.ndarray, frames: int, time, status: sd.CallbackFlags
):
if killed or (started and buffer.empty() and stopped):
event.set()
# outdata is of shape (frames, num_channels)
if buffer.empty():
outdata.fill(0)
return
n = 0
while n < frames and not buffer.empty():
remaining = frames - n
k = buffer.queue[0].shape[0]
if remaining <= k:
outdata[n:, 0] = buffer.queue[0][:remaining]
buffer.queue[0] = buffer.queue[0][remaining:]
n = frames
if buffer.queue[0].shape[0] == 0:
buffer.get()
break
outdata[n : n + k, 0] = buffer.get()
n += k
if n < frames:
outdata[n:, 0] = 0
# Please see
# https://python-sounddevice.readthedocs.io/en/0.4.6/usage.html#device-selection
# for how to select a device
def play_audio():
if False:
# This if branch can be safely removed. It is here to show you how to
# change the default output device in case you need that.
devices = sd.query_devices()
print(devices)
# sd.default.device[1] is the output device, if you want to
# select a different device, say, 3, as the output device, please
# use self.default.device[1] = 3
default_output_device_idx = sd.default.device[1]
print(
f'Use default output device: {devices[default_output_device_idx]["name"]}'
)
with sd.OutputStream(
channels=1,
callback=play_audio_callback,
dtype="float32",
samplerate=sample_rate,
blocksize=1024,
):
event.wait()
logging.info("Exiting ...")
def main():
args = get_args()
print(args)
tts_config = sherpa_onnx.OfflineTtsConfig(
model=sherpa_onnx.OfflineTtsModelConfig(
vits=sherpa_onnx.OfflineTtsVitsModelConfig(
model=args.vits_model,
lexicon=args.vits_lexicon,
data_dir=args.vits_data_dir,
tokens=args.vits_tokens,
),
provider=args.provider,
debug=args.debug,
num_threads=args.num_threads,
),
rule_fsts=args.tts_rule_fsts,
max_num_sentences=1,
)
if not tts_config.validate():
raise ValueError("Please check your config")
logging.info("Loading model ...")
tts = sherpa_onnx.OfflineTts(tts_config)
logging.info("Loading model done.")
global sample_rate
sample_rate = tts.sample_rate
play_back_thread = threading.Thread(target=play_audio)
play_back_thread.start()
logging.info("Start generating ...")
start = time.time()
audio = tts.generate(
args.text,
sid=args.sid,
speed=args.speed,
callback=generated_audio_callback,
)
end = time.time()
logging.info("Finished generating!")
global stopped
stopped = True
if len(audio.samples) == 0:
print("Error in generating audios. Please read previous error messages.")
return
elapsed_seconds = end - start
audio_duration = len(audio.samples) / audio.sample_rate
real_time_factor = elapsed_seconds / audio_duration
sf.write(
args.output_filename,
audio.samples,
samplerate=audio.sample_rate,
subtype="PCM_16",
)
logging.info(f"The text is '{args.text}'")
logging.info(f"Elapsed seconds: {elapsed_seconds:.3f}")
logging.info(f"Audio duration in seconds: {audio_duration:.3f}")
logging.info(
f"RTF: {elapsed_seconds:.3f}/{audio_duration:.3f} = {real_time_factor:.3f}"
)
logging.info(f"*** Saved to {args.output_filename} ***")
print("\n >>>>>>>>> You can safely press ctrl + C to stop the play <<<<<<<<<<\n")
play_back_thread.join()
if __name__ == "__main__":
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(format=formatter, level=logging.INFO)
try:
main()
except KeyboardInterrupt:
print("\nCaught Ctrl + C. Exiting")
killed = True
sys.exit(0)
... ...
... ... @@ -6,29 +6,30 @@
This file demonstrates how to use sherpa-onnx Python API to generate audio
from text, i.e., text-to-speech.
Different from ./offline-tts-play.py, this file does not play back the
generated audio.
Usage:
1. Download a model
Example (1/2)
wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/vits-ljs.onnx
wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/lexicon.txt
wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/tokens.txt
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
python3 ./python-api-examples/offline-tts.py \
--vits-model=./vits-ljs.onnx \
--vits-lexicon=./lexicon.txt \
--vits-tokens=./tokens.txt \
--output-filename=./generated.wav \
'liliana, the most beautiful and lovely assistant of our team!'
--vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \
--vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \
--vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \
--output-filename=./generated.wav \
"Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar."
2. Download a model
Example (2/2)
wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/vits-aishell3.onnx
wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/lexicon.txt
wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/tokens.txt
wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/rule.fst
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-zh-aishell3.tar.bz2
tar xvf vits-zh-aishell3.tar.bz2
python3 ./python-api-examples/offline-tts.py
python3 ./python-api-examples/offline-tts.py \
--vits-model=./vits-aishell3.onnx \
--vits-lexicon=./lexicon.txt \
--vits-tokens=./tokens.txt \
... ... @@ -37,9 +38,13 @@ python3 ./python-api-examples/offline-tts.py
--output-filename=./liubei-21.wav \
"勿以恶小而为之,勿以善小而不为。惟贤惟德,能服于人。122334"
You can find more models at
https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
Please see
https://k2-fsa.github.io/sherpa/onnx/tts/index.html
for details.
"""
import argparse
... ...
... ... @@ -59,6 +59,7 @@ def get_binaries_to_install():
binaries += ["sherpa-onnx-vad-microphone"]
binaries += ["sherpa-onnx-vad-microphone-offline-asr"]
binaries += ["sherpa-onnx-offline-tts"]
binaries += ["sherpa-onnx-offline-tts-play"]
if is_windows():
binaries += ["kaldi-native-fbank-core.dll"]
binaries += ["sherpa-onnx-c-api.dll"]
... ...
... ... @@ -575,10 +575,22 @@ SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts(
void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts) { delete tts; }
int32_t SherpaOnnxOfflineTtsSampleRate(const SherpaOnnxOfflineTts *tts) {
return tts->impl->SampleRate();
}
const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate(
const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid,
float speed) {
sherpa_onnx::GeneratedAudio audio = tts->impl->Generate(text, sid, speed);
return SherpaOnnxOfflineTtsGenerateWithCallback(tts, text, sid, speed,
nullptr);
}
const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerateWithCallback(
const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, float speed,
SherpaOnnxGeneratedAudioCallback callback) {
sherpa_onnx::GeneratedAudio audio =
tts->impl->Generate(text, sid, speed, callback);
if (audio.samples.empty()) {
return nullptr;
... ... @@ -596,7 +608,7 @@ const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate(
return ans;
}
SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTtsGeneratedAudio(
void SherpaOnnxDestroyOfflineTtsGeneratedAudio(
const SherpaOnnxGeneratedAudio *p) {
if (p) {
delete[] p->samples;
... ...
... ... @@ -633,6 +633,9 @@ SHERPA_ONNX_API typedef struct SherpaOnnxGeneratedAudio {
int32_t sample_rate;
} SherpaOnnxGeneratedAudio;
typedef void (*SherpaOnnxGeneratedAudioCallback)(const float *samples,
int32_t n);
SHERPA_ONNX_API typedef struct SherpaOnnxOfflineTts SherpaOnnxOfflineTts;
// Create an instance of offline TTS. The user has to use DestroyOfflineTts()
... ... @@ -643,13 +646,26 @@ SHERPA_ONNX_API SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts(
// Free the pointer returned by CreateOfflineTts()
SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts);
// Return the sample rate of the current TTS object
SHERPA_ONNX_API int32_t
SherpaOnnxOfflineTtsSampleRate(const SherpaOnnxOfflineTts *tts);
// Generate audio from the given text and speaker id (sid).
// The user has to use DestroyOfflineTtsGeneratedAudio() to free the returned
// pointer to avoid memory leak.
// The user has to use DestroyOfflineTtsGeneratedAudio() to free the
// returned pointer to avoid memory leak.
SHERPA_ONNX_API const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate(
const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid,
float speed);
// callback is called whenever SherpaOnnxOfflineTtsConfig.max_num_sentences
// sentences have been processed. The pointer passed to the callback
// is freed once the callback is returned. So the caller should not keep
// a reference to it.
SHERPA_ONNX_API const SherpaOnnxGeneratedAudio *
SherpaOnnxOfflineTtsGenerateWithCallback(
const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, float speed,
SherpaOnnxGeneratedAudioCallback callback);
SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTtsGeneratedAudio(
const SherpaOnnxGeneratedAudio *p);
... ...
... ... @@ -165,30 +165,26 @@ add_executable(sherpa-onnx-offline sherpa-onnx-offline.cc)
add_executable(sherpa-onnx-offline-parallel sherpa-onnx-offline-parallel.cc)
add_executable(sherpa-onnx-offline-tts sherpa-onnx-offline-tts.cc)
set(main_exes
sherpa-onnx
sherpa-onnx-offline
sherpa-onnx-offline-parallel
sherpa-onnx-offline-tts
)
target_link_libraries(sherpa-onnx sherpa-onnx-core)
target_link_libraries(sherpa-onnx-offline sherpa-onnx-core)
target_link_libraries(sherpa-onnx-offline-parallel sherpa-onnx-core)
target_link_libraries(sherpa-onnx-offline-tts sherpa-onnx-core)
if(NOT WIN32)
target_link_libraries(sherpa-onnx "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(sherpa-onnx "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
target_link_libraries(sherpa-onnx-offline "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(sherpa-onnx-offline "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
target_link_libraries(sherpa-onnx-offline-parallel "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(sherpa-onnx-offline-parallel "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
foreach(exe IN LISTS main_exes)
target_link_libraries(${exe} sherpa-onnx-core)
endforeach()
target_link_libraries(sherpa-onnx-offline-tts "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(sherpa-onnx-offline-tts "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
if(NOT WIN32)
foreach(exe IN LISTS main_exes)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
if(SHERPA_ONNX_ENABLE_PYTHON)
target_link_libraries(sherpa-onnx "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
target_link_libraries(sherpa-onnx-offline "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
target_link_libraries(sherpa-onnx-offline-parallel "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
target_link_libraries(sherpa-onnx-offline-tts "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
endif()
if(SHERPA_ONNX_ENABLE_PYTHON)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
endif()
endforeach()
endif()
if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32)
... ... @@ -203,10 +199,7 @@ endif()
install(
TARGETS
sherpa-onnx
sherpa-onnx-offline
sherpa-onnx-offline-parallel
sherpa-onnx-offline-tts
${main_exes}
DESTINATION
bin
)
... ... @@ -224,6 +217,11 @@ if(SHERPA_ONNX_HAS_ALSA)
endif()
if(SHERPA_ONNX_ENABLE_PORTAUDIO)
add_executable(sherpa-onnx-offline-tts-play
sherpa-onnx-offline-tts-play.cc
microphone.cc
)
add_executable(sherpa-onnx-microphone
sherpa-onnx-microphone.cc
microphone.cc
... ... @@ -251,6 +249,7 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO)
endif()
set(exes
sherpa-onnx-offline-tts-play
sherpa-onnx-microphone
sherpa-onnx-microphone-offline
sherpa-onnx-vad-microphone
... ... @@ -267,7 +266,6 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO)
endforeach()
if(SHERPA_ONNX_ENABLE_PYTHON)
foreach(exe IN LISTS exes)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
endforeach()
... ... @@ -343,7 +341,6 @@ if(SHERPA_ONNX_ENABLE_WEBSOCKET)
)
endif()
if(SHERPA_ONNX_ENABLE_TESTS)
set(sherpa_onnx_test_srcs
cat-test.cc
... ...
... ... @@ -28,8 +28,12 @@ class OfflineTtsImpl {
const OfflineTtsConfig &config);
#endif
virtual GeneratedAudio Generate(const std::string &text, int64_t sid = 0,
float speed = 1.0) const = 0;
virtual GeneratedAudio Generate(
const std::string &text, int64_t sid = 0, float speed = 1.0,
GeneratedAudioCallback callback = nullptr) const = 0;
// Return the sample rate of the generated audio
virtual int32_t SampleRate() const = 0;
};
} // namespace sherpa_onnx
... ...
... ... @@ -69,8 +69,11 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
}
#endif
GeneratedAudio Generate(const std::string &_text, int64_t sid = 0,
float speed = 1.0) const override {
int32_t SampleRate() const override { return model_->SampleRate(); }
GeneratedAudio Generate(
const std::string &_text, int64_t sid = 0, float speed = 1.0,
GeneratedAudioCallback callback = nullptr) const override {
int32_t num_speakers = model_->NumSpeakers();
if (num_speakers == 0 && sid != 0) {
SHERPA_ONNX_LOGE(
... ... @@ -118,7 +121,11 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
int32_t x_size = static_cast<int32_t>(x.size());
if (config_.max_num_sentences <= 0 || x_size <= config_.max_num_sentences) {
return Process(x, sid, speed);
auto ans = Process(x, sid, speed);
if (callback) {
callback(ans.samples.data(), ans.samples.size());
}
return ans;
}
// the input text is too long, we process sentences within it in batches
... ... @@ -149,6 +156,12 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
ans.sample_rate = audio.sample_rate;
ans.samples.insert(ans.samples.end(), audio.samples.begin(),
audio.samples.end());
if (callback) {
callback(audio.samples.data(), audio.samples.size());
// Caution(fangjun): audio is freed when the callback returns, so users
// should copy the data if they want to access the data after
// the callback returns to avoid segmentation fault.
}
}
batch.clear();
... ... @@ -162,6 +175,12 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
ans.sample_rate = audio.sample_rate;
ans.samples.insert(ans.samples.end(), audio.samples.begin(),
audio.samples.end());
if (callback) {
callback(audio.samples.data(), audio.samples.size());
// Caution(fangjun): audio is freed when the callback returns, so users
// should copy the data if they want to access the data after
// the callback returns to avoid segmentation fault.
}
}
return ans;
... ...
... ... @@ -65,9 +65,12 @@ OfflineTts::OfflineTts(AAssetManager *mgr, const OfflineTtsConfig &config)
OfflineTts::~OfflineTts() = default;
GeneratedAudio OfflineTts::Generate(const std::string &text, int64_t sid /*=0*/,
float speed /*= 1.0*/) const {
return impl_->Generate(text, sid, speed);
GeneratedAudio OfflineTts::Generate(
const std::string &text, int64_t sid /*=0*/, float speed /*= 1.0*/,
GeneratedAudioCallback callback /*= nullptr*/) const {
return impl_->Generate(text, sid, speed, callback);
}
int32_t OfflineTts::SampleRate() const { return impl_->SampleRate(); }
} // namespace sherpa_onnx
... ...
... ... @@ -5,6 +5,7 @@
#define SHERPA_ONNX_CSRC_OFFLINE_TTS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <vector>
... ... @@ -53,6 +54,9 @@ struct GeneratedAudio {
class OfflineTtsImpl;
using GeneratedAudioCallback =
std::function<void(const float * /*samples*/, int32_t /*n*/)>;
class OfflineTts {
public:
~OfflineTts();
... ... @@ -67,8 +71,20 @@ class OfflineTts {
// trained using the VCTK dataset. It is not used for
// single-speaker models, e.g., models trained using the ljspeech
// dataset.
// @param speed The speed for the generated speech. E.g., 2 means 2x faster.
// @param callback If not NULL, it is called whenever config.max_num_sentences
// sentences have been processed. Note that the passed
// pointer `samples` for the callback might be invalidated
// after the callback is returned, so the caller should not
// keep a reference to it. The caller can copy the data if
// he/she wants to access the samples after the callback
// returns. The callback is called in the current thread.
GeneratedAudio Generate(const std::string &text, int64_t sid = 0,
float speed = 1.0) const;
float speed = 1.0,
GeneratedAudioCallback callback = nullptr) const;
// Return the sample rate of the generated audio
int32_t SampleRate() const;
private:
std::unique_ptr<OfflineTtsImpl> impl_;
... ...
... ... @@ -95,7 +95,8 @@ static std::vector<int64_t> PhonemesToIds(
ans.push_back(token2id.at(p));
ans.push_back(pad);
} else {
SHERPA_ONNX_LOGE("Skip unkown phonemes. Unicode codepoint: \\U+%04x.", p);
SHERPA_ONNX_LOGE("Skip unknown phonemes. Unicode codepoint: \\U+%04x.",
static_cast<uint32_t>(p));
}
}
ans.push_back(eos);
... ...
// sherpa-onnx/csrc/sherpa-onnx-offline-tts-play.cc
//
// Copyright (c) 2023 Xiaomi Corporation
#include <signal.h>
#include <algorithm>
#include <chrono> // NOLINT
#include <condition_variable> // NOLINT
#include <fstream>
#include <mutex> // NOLINT
#include <queue>
#include <thread> // NOLINT
#include <vector>
#include "portaudio.h" // NOLINT
#include "sherpa-onnx/csrc/microphone.h"
#include "sherpa-onnx/csrc/offline-tts.h"
#include "sherpa-onnx/csrc/parse-options.h"
#include "sherpa-onnx/csrc/wave-writer.h"
static std::condition_variable g_cv;
static std::mutex g_cv_m;
struct Samples {
std::vector<float> data;
int32_t consumed = 0;
};
struct Buffer {
std::queue<Samples> samples;
std::mutex mutex;
};
static Buffer g_buffer;
static bool g_started = false;
static bool g_stopped = false;
static bool g_killed = false;
static void Handler(int32_t /*sig*/) {
if (g_killed) {
exit(0);
}
g_killed = true;
fprintf(stderr, "\nCaught Ctrl + C. Exiting\n");
}
static void AudioGeneratedCallback(const float *s, int32_t n) {
if (n > 0) {
Samples samples;
samples.data = std::vector<float>{s, s + n};
std::lock_guard<std::mutex> lock(g_buffer.mutex);
g_buffer.samples.push(std::move(samples));
g_started = true;
}
}
static int PlayCallback(const void * /*in*/, void *out,
unsigned long n, // NOLINT
const PaStreamCallbackTimeInfo * /*time_info*/,
PaStreamCallbackFlags /*status_flags*/,
void * /*user_data*/) {
if (g_killed) {
return paComplete;
}
float *pout = reinterpret_cast<float *>(out);
std::lock_guard<std::mutex> lock(g_buffer.mutex);
if (g_buffer.samples.empty()) {
if (g_stopped) {
// no more data is available and we have processed all of the samples
return paComplete;
}
// The current sentence is so long, though very unlikely, that
// the model has not finished processing it yet.
std::fill_n(pout, n, 0);
return paContinue;
}
int32_t k = 0;
for (; k < n && !g_buffer.samples.empty();) {
int32_t this_block = n - k;
auto &p = g_buffer.samples.front();
int32_t remaining = p.data.size() - p.consumed;
if (this_block <= remaining) {
std::copy(p.data.begin() + p.consumed,
p.data.begin() + p.consumed + this_block, pout + k);
p.consumed += this_block;
k = n;
if (p.consumed == p.data.size()) {
g_buffer.samples.pop();
}
break;
}
std::copy(p.data.begin() + p.consumed, p.data.end(), pout + k);
k += p.data.size() - p.consumed;
g_buffer.samples.pop();
}
if (k < n) {
std::fill_n(pout + k, n - k, 0);
}
if (g_stopped && g_buffer.samples.empty()) {
return paComplete;
}
return paContinue;
}
static void PlayCallbackFinished(void *userData) { g_cv.notify_all(); }
static void StartPlayback(int32_t sample_rate) {
int32_t frames_per_buffer = 1024;
PaStreamParameters outputParameters;
PaStream *stream;
PaError err;
outputParameters.device =
Pa_GetDefaultOutputDevice(); /* default output device */
outputParameters.channelCount = 1; /* stereo output */
outputParameters.sampleFormat = paFloat32; /* 32 bit floating point output */
outputParameters.suggestedLatency =
Pa_GetDeviceInfo(outputParameters.device)->defaultLowOutputLatency;
outputParameters.hostApiSpecificStreamInfo = nullptr;
err = Pa_OpenStream(&stream, nullptr, /* no input */
&outputParameters, sample_rate, frames_per_buffer,
paClipOff, // we won't output out of range samples so
// don't bother clipping them
PlayCallback, nullptr);
if (err != paNoError) {
fprintf(stderr, "%d portaudio error: %s\n", __LINE__, Pa_GetErrorText(err));
return;
}
err = Pa_SetStreamFinishedCallback(stream, &PlayCallbackFinished);
if (err != paNoError) {
fprintf(stderr, "%d portaudio error: %s\n", __LINE__, Pa_GetErrorText(err));
return;
}
err = Pa_StartStream(stream);
if (err != paNoError) {
fprintf(stderr, "%d portaudio error: %s\n", __LINE__, Pa_GetErrorText(err));
return;
}
std::unique_lock<std::mutex> lock(g_cv_m);
while (!g_killed && !g_stopped &&
(!g_started || (g_started && !g_buffer.samples.empty()))) {
g_cv.wait(lock);
}
err = Pa_StopStream(stream);
if (err != paNoError) {
return;
}
err = Pa_CloseStream(stream);
if (err != paNoError) {
return;
}
}
int main(int32_t argc, char *argv[]) {
signal(SIGINT, Handler);
const char *kUsageMessage = R"usage(
Offline text-to-speech with sherpa-onnx.
It plays the generated audio as the model is processing.
Usage example:
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
./bin/sherpa-onnx-offline-tts-play \
--vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \
--vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \
--vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \
--output-filename=./generated.wav \
"Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar."
It will generate a file ./generated.wav as specified by --output-filename.
You can find more models at
https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
Please see
https://k2-fsa.github.io/sherpa/onnx/tts/index.html
or details.
)usage";
sherpa_onnx::ParseOptions po(kUsageMessage);
std::string output_filename = "./generated.wav";
int32_t sid = 0;
po.Register("output-filename", &output_filename,
"Path to save the generated audio");
po.Register("sid", &sid,
"Speaker ID. Used only for multi-speaker models, e.g., models "
"trained using the VCTK dataset. Not used for single-speaker "
"models, e.g., models trained using the LJSpeech dataset");
sherpa_onnx::OfflineTtsConfig config;
config.Register(&po);
po.Read(argc, argv);
if (po.NumArgs() == 0) {
fprintf(stderr, "Error: Please provide the text to generate audio.\n\n");
po.PrintUsage();
exit(EXIT_FAILURE);
}
if (po.NumArgs() > 1) {
fprintf(stderr,
"Error: Accept only one positional argument. Please use single "
"quotes to wrap your text\n");
po.PrintUsage();
exit(EXIT_FAILURE);
}
if (!config.Validate()) {
fprintf(stderr, "Errors in config!\n");
exit(EXIT_FAILURE);
}
sherpa_onnx::Microphone mic;
PaDeviceIndex num_devices = Pa_GetDeviceCount();
fprintf(stderr, "Num devices: %d\n", num_devices);
PaStreamParameters param;
param.device = Pa_GetDefaultOutputDevice();
if (param.device == paNoDevice) {
fprintf(stderr, "No default output device found\n");
exit(EXIT_FAILURE);
}
fprintf(stderr, "Use default device: %d\n", param.device);
const PaDeviceInfo *info = Pa_GetDeviceInfo(param.device);
fprintf(stderr, " Name: %s\n", info->name);
fprintf(stderr, " Max output channels: %d\n", info->maxOutputChannels);
if (config.max_num_sentences != 1) {
fprintf(stderr, "Setting config.max_num_sentences to 1\n");
config.max_num_sentences = 1;
}
fprintf(stderr, "Loading the model\n");
sherpa_onnx::OfflineTts tts(config);
fprintf(stderr, "Start the playback thread\n");
std::thread playback_thread(StartPlayback, tts.SampleRate());
float speed = 1.0;
fprintf(stderr, "Generating ...\n");
const auto begin = std::chrono::steady_clock::now();
auto audio = tts.Generate(po.GetArg(1), sid, speed, AudioGeneratedCallback);
const auto end = std::chrono::steady_clock::now();
g_stopped = true;
fprintf(stderr, "Generating done!\n");
if (audio.samples.empty()) {
fprintf(
stderr,
"Error in generating audio. Please read previous error messages.\n");
exit(EXIT_FAILURE);
}
float elapsed_seconds =
std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
.count() /
1000.;
float duration = audio.samples.size() / static_cast<float>(audio.sample_rate);
float rtf = elapsed_seconds / duration;
fprintf(stderr, "Elapsed seconds: %.3f s\n", elapsed_seconds);
fprintf(stderr, "Audio duration: %.3f s\n", duration);
fprintf(stderr, "Real-time factor (RTF): %.3f/%.3f = %.3f\n", elapsed_seconds,
duration, rtf);
bool ok = sherpa_onnx::WriteWave(output_filename, audio.sample_rate,
audio.samples.data(), audio.samples.size());
if (!ok) {
fprintf(stderr, "Failed to write wave to %s\n", output_filename.c_str());
exit(EXIT_FAILURE);
}
fprintf(stderr, "The text is: %s. Speaker ID: %d\n\n", po.GetArg(1).c_str(),
sid);
fprintf(stderr, "\n**** Saved to %s successfully! ****\n",
output_filename.c_str());
fprintf(stderr, "\n");
fprintf(
stderr,
"Wait for the playback to finish. You can safely press ctrl + C to stop "
"the playback.\n");
playback_thread.join();
fprintf(stderr, "Done!\n");
return 0;
}
... ...
... ... @@ -2,6 +2,7 @@
//
// Copyright (c) 2023 Xiaomi Corporation
#include <chrono> // NOLINT
#include <fstream>
#include "sherpa-onnx/csrc/offline-tts.h"
... ... @@ -12,31 +13,22 @@ int main(int32_t argc, char *argv[]) {
const char *kUsageMessage = R"usage(
Offline text-to-speech with sherpa-onnx
Usage example:
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
./bin/sherpa-onnx-offline-tts \
--vits-model=/path/to/model.onnx \
--vits-lexicon=/path/to/lexicon.txt \
--vits-tokens=/path/to/tokens.txt \
--sid=0 \
--vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \
--vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \
--vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \
--output-filename=./generated.wav \
'some text within single quotes on linux/macos or use double quotes on windows'
"Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar."
It will generate a file ./generated.wav as specified by --output-filename.
You can download a test model from
https://huggingface.co/csukuangfj/vits-ljs
For instance, you can use:
wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/vits-ljs.onnx
wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/lexicon.txt
wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/tokens.txt
./bin/sherpa-onnx-offline-tts \
--vits-model=./vits-ljs.onnx \
--vits-lexicon=./lexicon.txt \
--vits-tokens=./tokens.txt \
--sid=0 \
--output-filename=./generated.wav \
'liliana, the most beautiful and lovely assistant of our team!'
You can find more models at
https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
Please see
https://k2-fsa.github.io/sherpa/onnx/tts/index.html
... ... @@ -80,14 +72,30 @@ or details.
}
sherpa_onnx::OfflineTts tts(config);
const auto begin = std::chrono::steady_clock::now();
auto audio = tts.Generate(po.GetArg(1), sid);
const auto end = std::chrono::steady_clock::now();
if (audio.samples.empty()) {
fprintf(
stderr,
"Error in generating audios. Please read previous error messages.\n");
"Error in generating audio. Please read previous error messages.\n");
exit(EXIT_FAILURE);
}
float elapsed_seconds =
std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
.count() /
1000.;
float duration = audio.samples.size() / static_cast<float>(audio.sample_rate);
float rtf = elapsed_seconds / duration;
fprintf(stderr, "Elapsed seconds: %.3f s\n", elapsed_seconds);
fprintf(stderr, "Audio duration: %.3f s\n", duration);
fprintf(stderr, "Real-time factor (RTF): %.3f/%.3f = %.3f\n", elapsed_seconds,
duration, rtf);
bool ok = sherpa_onnx::WriteWave(output_filename, audio.sample_rate,
audio.samples.data(), audio.samples.size());
if (!ok) {
... ...
... ... @@ -3,6 +3,7 @@
// Copyright (c) 2023 Xiaomi Corporation
#include "sherpa-onnx/python/csrc/offline-tts.h"
#include <algorithm>
#include <string>
#include "sherpa-onnx/csrc/offline-tts.h"
... ... @@ -48,8 +49,35 @@ void PybindOfflineTts(py::module *m) {
using PyClass = OfflineTts;
py::class_<PyClass>(*m, "OfflineTts")
.def(py::init<const OfflineTtsConfig &>(), py::arg("config"))
.def("generate", &PyClass::Generate, py::arg("text"), py::arg("sid") = 0,
py::arg("speed") = 1.0, py::call_guard<py::gil_scoped_release>());
.def_property_readonly("sample_rate", &PyClass::SampleRate)
.def(
"generate",
[](const PyClass &self, const std::string &text, int64_t sid,
float speed, std::function<void(py::array_t<float>)> callback)
-> GeneratedAudio {
if (!callback) {
return self.Generate(text, sid, speed);
}
std::function<void(const float *, int32_t)> callback_wrapper =
[callback](const float *samples, int32_t n) {
// CAUTION(fangjun): we have to copy samples since it is
// freed once the call back returns.
pybind11::gil_scoped_acquire acquire;
pybind11::array_t<float> array(n);
py::buffer_info buf = array.request();
auto p = static_cast<float *>(buf.ptr);
std::copy(samples, samples + n, p);
callback(array);
};
return self.Generate(text, sid, speed, callback_wrapper);
},
py::arg("text"), py::arg("sid") = 0, py::arg("speed") = 1.0,
py::arg("callback") = py::none(),
py::call_guard<py::gil_scoped_release>());
}
} // namespace sherpa_onnx
... ...
... ... @@ -5,6 +5,7 @@
#ifndef SHERPA_ONNX_PYTHON_CSRC_SHERPA_ONNX_H_
#define SHERPA_ONNX_PYTHON_CSRC_SHERPA_ONNX_H_
#include "pybind11/functional.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
... ...