Committed by
GitHub
Play generated audio as it is generating. (#457)
正在显示
20 个修改的文件
包含
876 行增加
和
79 行删除
| @@ -143,6 +143,7 @@ class BuildExtension(build_ext): | @@ -143,6 +143,7 @@ class BuildExtension(build_ext): | ||
| 143 | binaries += ["sherpa-onnx-vad-microphone"] | 143 | binaries += ["sherpa-onnx-vad-microphone"] |
| 144 | binaries += ["sherpa-onnx-vad-microphone-offline-asr"] | 144 | binaries += ["sherpa-onnx-vad-microphone-offline-asr"] |
| 145 | binaries += ["sherpa-onnx-offline-tts"] | 145 | binaries += ["sherpa-onnx-offline-tts"] |
| 146 | + binaries += ["sherpa-onnx-offline-tts-play"] | ||
| 146 | 147 | ||
| 147 | if is_windows(): | 148 | if is_windows(): |
| 148 | binaries += ["kaldi-native-fbank-core.dll"] | 149 | binaries += ["kaldi-native-fbank-core.dll"] |
| @@ -5,6 +5,7 @@ function(download_espeak_ng_for_piper) | @@ -5,6 +5,7 @@ function(download_espeak_ng_for_piper) | ||
| 5 | set(espeak_ng_URL2 "") | 5 | set(espeak_ng_URL2 "") |
| 6 | set(espeak_ng_HASH "SHA256=8a48251e6926133dd91fcf6cb210c7c2e290a9b578d269446e2d32d710b0dfa0") | 6 | set(espeak_ng_HASH "SHA256=8a48251e6926133dd91fcf6cb210c7c2e290a9b578d269446e2d32d710b0dfa0") |
| 7 | 7 | ||
| 8 | + set(BUILD_ESPEAK_NG_TESTS OFF CACHE BOOL "" FORCE) | ||
| 8 | set(USE_ASYNC OFF CACHE BOOL "" FORCE) | 9 | set(USE_ASYNC OFF CACHE BOOL "" FORCE) |
| 9 | set(USE_MBROLA OFF CACHE BOOL "" FORCE) | 10 | set(USE_MBROLA OFF CACHE BOOL "" FORCE) |
| 10 | set(USE_LIBSONIC OFF CACHE BOOL "" FORCE) | 11 | set(USE_LIBSONIC OFF CACHE BOOL "" FORCE) |
| @@ -106,10 +107,12 @@ function(download_espeak_ng_for_piper) | @@ -106,10 +107,12 @@ function(download_espeak_ng_for_piper) | ||
| 106 | if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32) | 107 | if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32) |
| 107 | install(TARGETS | 108 | install(TARGETS |
| 108 | espeak-ng | 109 | espeak-ng |
| 110 | + ucd | ||
| 109 | DESTINATION ..) | 111 | DESTINATION ..) |
| 110 | else() | 112 | else() |
| 111 | install(TARGETS | 113 | install(TARGETS |
| 112 | espeak-ng | 114 | espeak-ng |
| 115 | + ucd | ||
| 113 | DESTINATION lib) | 116 | DESTINATION lib) |
| 114 | endif() | 117 | endif() |
| 115 | 118 | ||
| @@ -120,6 +123,7 @@ function(download_espeak_ng_for_piper) | @@ -120,6 +123,7 @@ function(download_espeak_ng_for_piper) | ||
| 120 | if(WIN32 AND BUILD_SHARED_LIBS) | 123 | if(WIN32 AND BUILD_SHARED_LIBS) |
| 121 | install(TARGETS | 124 | install(TARGETS |
| 122 | espeak-ng | 125 | espeak-ng |
| 126 | + ucd | ||
| 123 | DESTINATION bin) | 127 | DESTINATION bin) |
| 124 | endif() | 128 | endif() |
| 125 | endfunction() | 129 | endfunction() |
| @@ -14,6 +14,9 @@ | @@ -14,6 +14,9 @@ | ||
| 14 | sherpa-onnx-fst.lib; | 14 | sherpa-onnx-fst.lib; |
| 15 | kaldi-native-fbank-core.lib; | 15 | kaldi-native-fbank-core.lib; |
| 16 | onnxruntime.lib; | 16 | onnxruntime.lib; |
| 17 | + piper_phonemize.lib; | ||
| 18 | + espeak-ng.lib; | ||
| 19 | + ucd.lib; | ||
| 17 | </SherpaOnnxLibraries> | 20 | </SherpaOnnxLibraries> |
| 18 | </PropertyGroup> | 21 | </PropertyGroup> |
| 19 | <ItemDefinitionGroup> | 22 | <ItemDefinitionGroup> |
| @@ -14,6 +14,9 @@ | @@ -14,6 +14,9 @@ | ||
| 14 | sherpa-onnx-fst.lib; | 14 | sherpa-onnx-fst.lib; |
| 15 | kaldi-native-fbank-core.lib; | 15 | kaldi-native-fbank-core.lib; |
| 16 | onnxruntime.lib; | 16 | onnxruntime.lib; |
| 17 | + piper_phonemize.lib; | ||
| 18 | + espeak-ng.lib; | ||
| 19 | + ucd.lib; | ||
| 17 | </SherpaOnnxLibraries> | 20 | </SherpaOnnxLibraries> |
| 18 | </PropertyGroup> | 21 | </PropertyGroup> |
| 19 | <ItemDefinitionGroup> | 22 | <ItemDefinitionGroup> |
| @@ -14,6 +14,9 @@ | @@ -14,6 +14,9 @@ | ||
| 14 | sherpa-onnx-fst.lib; | 14 | sherpa-onnx-fst.lib; |
| 15 | kaldi-native-fbank-core.lib; | 15 | kaldi-native-fbank-core.lib; |
| 16 | onnxruntime.lib; | 16 | onnxruntime.lib; |
| 17 | + piper_phonemize.lib; | ||
| 18 | + espeak-ng.lib; | ||
| 19 | + ucd.lib; | ||
| 17 | </SherpaOnnxLibraries> | 20 | </SherpaOnnxLibraries> |
| 18 | </PropertyGroup> | 21 | </PropertyGroup> |
| 19 | <ItemDefinitionGroup> | 22 | <ItemDefinitionGroup> |
python-api-examples/offline-tts-play.py
0 → 100755
| 1 | +#!/usr/bin/env python3 | ||
| 2 | +# | ||
| 3 | +# Copyright (c) 2023 Xiaomi Corporation | ||
| 4 | + | ||
| 5 | +""" | ||
| 6 | +This file demonstrates how to use sherpa-onnx Python API to generate audio | ||
| 7 | +from text, i.e., text-to-speech. | ||
| 8 | + | ||
| 9 | +Different from ./offline-tts.py, this file plays back the generated audio | ||
| 10 | +while the model is still generating. | ||
| 11 | + | ||
| 12 | +Usage: | ||
| 13 | + | ||
| 14 | +Example (1/2) | ||
| 15 | + | ||
| 16 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2 | ||
| 17 | +tar xf vits-piper-en_US-amy-low.tar.bz2 | ||
| 18 | + | ||
| 19 | +python3 ./python-api-examples/offline-tts-play.py \ | ||
| 20 | + --vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \ | ||
| 21 | + --vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \ | ||
| 22 | + --vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \ | ||
| 23 | + --output-filename=./generated.wav \ | ||
| 24 | + "Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar." | ||
| 25 | + | ||
| 26 | +Example (2/2) | ||
| 27 | + | ||
| 28 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-zh-aishell3.tar.bz2 | ||
| 29 | +tar xvf vits-zh-aishell3.tar.bz2 | ||
| 30 | + | ||
| 31 | +python3 ./python-api-examples/offline-tts-play.py \ | ||
| 32 | + --vits-model=./vits-aishell3.onnx \ | ||
| 33 | + --vits-lexicon=./lexicon.txt \ | ||
| 34 | + --vits-tokens=./tokens.txt \ | ||
| 35 | + --tts-rule-fsts=./rule.fst \ | ||
| 36 | + --sid=21 \ | ||
| 37 | + --output-filename=./liubei-21.wav \ | ||
| 38 | + "勿以恶小而为之,勿以善小而不为。惟贤惟德,能服于人。122334" | ||
| 39 | + | ||
| 40 | +You can find more models at | ||
| 41 | +https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models | ||
| 42 | + | ||
| 43 | +Please see | ||
| 44 | +https://k2-fsa.github.io/sherpa/onnx/tts/index.html | ||
| 45 | +for details. | ||
| 46 | +""" | ||
| 47 | + | ||
| 48 | +import argparse | ||
| 49 | +import logging | ||
| 50 | +import queue | ||
| 51 | +import sys | ||
| 52 | +import threading | ||
| 53 | +import time | ||
| 54 | + | ||
| 55 | +import numpy as np | ||
| 56 | +import sherpa_onnx | ||
| 57 | +import soundfile as sf | ||
| 58 | + | ||
| 59 | +try: | ||
| 60 | + import sounddevice as sd | ||
| 61 | +except ImportError: | ||
| 62 | + print("Please install sounddevice first. You can use") | ||
| 63 | + print() | ||
| 64 | + print(" pip install sounddevice") | ||
| 65 | + print() | ||
| 66 | + print("to install it") | ||
| 67 | + sys.exit(-1) | ||
| 68 | + | ||
| 69 | + | ||
| 70 | +def get_args(): | ||
| 71 | + parser = argparse.ArgumentParser( | ||
| 72 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter | ||
| 73 | + ) | ||
| 74 | + | ||
| 75 | + parser.add_argument( | ||
| 76 | + "--vits-model", | ||
| 77 | + type=str, | ||
| 78 | + help="Path to vits model.onnx", | ||
| 79 | + ) | ||
| 80 | + | ||
| 81 | + parser.add_argument( | ||
| 82 | + "--vits-lexicon", | ||
| 83 | + type=str, | ||
| 84 | + default="", | ||
| 85 | + help="Path to lexicon.txt", | ||
| 86 | + ) | ||
| 87 | + | ||
| 88 | + parser.add_argument( | ||
| 89 | + "--vits-tokens", | ||
| 90 | + type=str, | ||
| 91 | + default="", | ||
| 92 | + help="Path to tokens.txt", | ||
| 93 | + ) | ||
| 94 | + | ||
| 95 | + parser.add_argument( | ||
| 96 | + "--vits-data-dir", | ||
| 97 | + type=str, | ||
| 98 | + default="", | ||
| 99 | + help="""Path to the dict director of espeak-ng. If it is specified, | ||
| 100 | + --vits-lexicon and --vits-tokens are ignored""", | ||
| 101 | + ) | ||
| 102 | + | ||
| 103 | + parser.add_argument( | ||
| 104 | + "--tts-rule-fsts", | ||
| 105 | + type=str, | ||
| 106 | + default="", | ||
| 107 | + help="Path to rule.fst", | ||
| 108 | + ) | ||
| 109 | + | ||
| 110 | + parser.add_argument( | ||
| 111 | + "--output-filename", | ||
| 112 | + type=str, | ||
| 113 | + default="./generated.wav", | ||
| 114 | + help="Path to save generated wave", | ||
| 115 | + ) | ||
| 116 | + | ||
| 117 | + parser.add_argument( | ||
| 118 | + "--sid", | ||
| 119 | + type=int, | ||
| 120 | + default=0, | ||
| 121 | + help="""Speaker ID. Used only for multi-speaker models, e.g. | ||
| 122 | + models trained using the VCTK dataset. Not used for single-speaker | ||
| 123 | + models, e.g., models trained using the LJ speech dataset. | ||
| 124 | + """, | ||
| 125 | + ) | ||
| 126 | + | ||
| 127 | + parser.add_argument( | ||
| 128 | + "--debug", | ||
| 129 | + type=bool, | ||
| 130 | + default=False, | ||
| 131 | + help="True to show debug messages", | ||
| 132 | + ) | ||
| 133 | + | ||
| 134 | + parser.add_argument( | ||
| 135 | + "--provider", | ||
| 136 | + type=str, | ||
| 137 | + default="cpu", | ||
| 138 | + help="valid values: cpu, cuda, coreml", | ||
| 139 | + ) | ||
| 140 | + | ||
| 141 | + parser.add_argument( | ||
| 142 | + "--num-threads", | ||
| 143 | + type=int, | ||
| 144 | + default=1, | ||
| 145 | + help="Number of threads for neural network computation", | ||
| 146 | + ) | ||
| 147 | + | ||
| 148 | + parser.add_argument( | ||
| 149 | + "--speed", | ||
| 150 | + type=float, | ||
| 151 | + default=1.0, | ||
| 152 | + help="Speech speed. Larger->faster; smaller->slower", | ||
| 153 | + ) | ||
| 154 | + | ||
| 155 | + parser.add_argument( | ||
| 156 | + "text", | ||
| 157 | + type=str, | ||
| 158 | + help="The input text to generate audio for", | ||
| 159 | + ) | ||
| 160 | + | ||
| 161 | + return parser.parse_args() | ||
| 162 | + | ||
| 163 | + | ||
| 164 | +# buffer saves audio samples to be played | ||
| 165 | +buffer = queue.Queue() | ||
| 166 | + | ||
| 167 | +# started is set to True once generated_audio_callback is called. | ||
| 168 | +started = False | ||
| 169 | + | ||
| 170 | +# stopped is set to True once all the text has been processed | ||
| 171 | +stopped = False | ||
| 172 | + | ||
| 173 | +# killed is set to True once ctrl + C is pressed | ||
| 174 | +killed = False | ||
| 175 | + | ||
| 176 | +# Note: When started is True, and stopped is True, and buffer is empty, | ||
| 177 | +# we will exit the program since all audio samples have been played. | ||
| 178 | + | ||
| 179 | +sample_rate = None | ||
| 180 | + | ||
| 181 | +event = threading.Event() | ||
| 182 | + | ||
| 183 | + | ||
| 184 | +def generated_audio_callback(samples: np.ndarray): | ||
| 185 | + """This function is called whenever max_num_sentences sentences | ||
| 186 | + have been processed. | ||
| 187 | + | ||
| 188 | + Note that it is passed to C++ and is invoked in C++. | ||
| 189 | + | ||
| 190 | + Args: | ||
| 191 | + samples: | ||
| 192 | + A 1-D np.float32 array containing audio samples | ||
| 193 | + """ | ||
| 194 | + buffer.put(samples) | ||
| 195 | + global started | ||
| 196 | + | ||
| 197 | + if started is False: | ||
| 198 | + logging.info("Start playing ...") | ||
| 199 | + started = True | ||
| 200 | + | ||
| 201 | + | ||
| 202 | +# see https://python-sounddevice.readthedocs.io/en/0.4.6/api/streams.html#sounddevice.OutputStream | ||
| 203 | +def play_audio_callback( | ||
| 204 | + outdata: np.ndarray, frames: int, time, status: sd.CallbackFlags | ||
| 205 | +): | ||
| 206 | + if killed or (started and buffer.empty() and stopped): | ||
| 207 | + event.set() | ||
| 208 | + | ||
| 209 | + # outdata is of shape (frames, num_channels) | ||
| 210 | + if buffer.empty(): | ||
| 211 | + outdata.fill(0) | ||
| 212 | + return | ||
| 213 | + | ||
| 214 | + n = 0 | ||
| 215 | + while n < frames and not buffer.empty(): | ||
| 216 | + remaining = frames - n | ||
| 217 | + k = buffer.queue[0].shape[0] | ||
| 218 | + | ||
| 219 | + if remaining <= k: | ||
| 220 | + outdata[n:, 0] = buffer.queue[0][:remaining] | ||
| 221 | + buffer.queue[0] = buffer.queue[0][remaining:] | ||
| 222 | + n = frames | ||
| 223 | + if buffer.queue[0].shape[0] == 0: | ||
| 224 | + buffer.get() | ||
| 225 | + | ||
| 226 | + break | ||
| 227 | + | ||
| 228 | + outdata[n : n + k, 0] = buffer.get() | ||
| 229 | + n += k | ||
| 230 | + | ||
| 231 | + if n < frames: | ||
| 232 | + outdata[n:, 0] = 0 | ||
| 233 | + | ||
| 234 | + | ||
| 235 | +# Please see | ||
| 236 | +# https://python-sounddevice.readthedocs.io/en/0.4.6/usage.html#device-selection | ||
| 237 | +# for how to select a device | ||
| 238 | +def play_audio(): | ||
| 239 | + if False: | ||
| 240 | + # This if branch can be safely removed. It is here to show you how to | ||
| 241 | + # change the default output device in case you need that. | ||
| 242 | + devices = sd.query_devices() | ||
| 243 | + print(devices) | ||
| 244 | + | ||
| 245 | + # sd.default.device[1] is the output device, if you want to | ||
| 246 | + # select a different device, say, 3, as the output device, please | ||
| 247 | + # use self.default.device[1] = 3 | ||
| 248 | + | ||
| 249 | + default_output_device_idx = sd.default.device[1] | ||
| 250 | + print( | ||
| 251 | + f'Use default output device: {devices[default_output_device_idx]["name"]}' | ||
| 252 | + ) | ||
| 253 | + | ||
| 254 | + with sd.OutputStream( | ||
| 255 | + channels=1, | ||
| 256 | + callback=play_audio_callback, | ||
| 257 | + dtype="float32", | ||
| 258 | + samplerate=sample_rate, | ||
| 259 | + blocksize=1024, | ||
| 260 | + ): | ||
| 261 | + event.wait() | ||
| 262 | + | ||
| 263 | + logging.info("Exiting ...") | ||
| 264 | + | ||
| 265 | + | ||
| 266 | +def main(): | ||
| 267 | + args = get_args() | ||
| 268 | + print(args) | ||
| 269 | + | ||
| 270 | + tts_config = sherpa_onnx.OfflineTtsConfig( | ||
| 271 | + model=sherpa_onnx.OfflineTtsModelConfig( | ||
| 272 | + vits=sherpa_onnx.OfflineTtsVitsModelConfig( | ||
| 273 | + model=args.vits_model, | ||
| 274 | + lexicon=args.vits_lexicon, | ||
| 275 | + data_dir=args.vits_data_dir, | ||
| 276 | + tokens=args.vits_tokens, | ||
| 277 | + ), | ||
| 278 | + provider=args.provider, | ||
| 279 | + debug=args.debug, | ||
| 280 | + num_threads=args.num_threads, | ||
| 281 | + ), | ||
| 282 | + rule_fsts=args.tts_rule_fsts, | ||
| 283 | + max_num_sentences=1, | ||
| 284 | + ) | ||
| 285 | + | ||
| 286 | + if not tts_config.validate(): | ||
| 287 | + raise ValueError("Please check your config") | ||
| 288 | + | ||
| 289 | + logging.info("Loading model ...") | ||
| 290 | + tts = sherpa_onnx.OfflineTts(tts_config) | ||
| 291 | + logging.info("Loading model done.") | ||
| 292 | + | ||
| 293 | + global sample_rate | ||
| 294 | + sample_rate = tts.sample_rate | ||
| 295 | + | ||
| 296 | + play_back_thread = threading.Thread(target=play_audio) | ||
| 297 | + play_back_thread.start() | ||
| 298 | + | ||
| 299 | + logging.info("Start generating ...") | ||
| 300 | + start = time.time() | ||
| 301 | + audio = tts.generate( | ||
| 302 | + args.text, | ||
| 303 | + sid=args.sid, | ||
| 304 | + speed=args.speed, | ||
| 305 | + callback=generated_audio_callback, | ||
| 306 | + ) | ||
| 307 | + end = time.time() | ||
| 308 | + logging.info("Finished generating!") | ||
| 309 | + global stopped | ||
| 310 | + stopped = True | ||
| 311 | + | ||
| 312 | + if len(audio.samples) == 0: | ||
| 313 | + print("Error in generating audios. Please read previous error messages.") | ||
| 314 | + return | ||
| 315 | + | ||
| 316 | + elapsed_seconds = end - start | ||
| 317 | + audio_duration = len(audio.samples) / audio.sample_rate | ||
| 318 | + real_time_factor = elapsed_seconds / audio_duration | ||
| 319 | + | ||
| 320 | + sf.write( | ||
| 321 | + args.output_filename, | ||
| 322 | + audio.samples, | ||
| 323 | + samplerate=audio.sample_rate, | ||
| 324 | + subtype="PCM_16", | ||
| 325 | + ) | ||
| 326 | + logging.info(f"The text is '{args.text}'") | ||
| 327 | + logging.info(f"Elapsed seconds: {elapsed_seconds:.3f}") | ||
| 328 | + logging.info(f"Audio duration in seconds: {audio_duration:.3f}") | ||
| 329 | + logging.info( | ||
| 330 | + f"RTF: {elapsed_seconds:.3f}/{audio_duration:.3f} = {real_time_factor:.3f}" | ||
| 331 | + ) | ||
| 332 | + | ||
| 333 | + logging.info(f"*** Saved to {args.output_filename} ***") | ||
| 334 | + | ||
| 335 | + print("\n >>>>>>>>> You can safely press ctrl + C to stop the play <<<<<<<<<<\n") | ||
| 336 | + | ||
| 337 | + play_back_thread.join() | ||
| 338 | + | ||
| 339 | + | ||
| 340 | +if __name__ == "__main__": | ||
| 341 | + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" | ||
| 342 | + | ||
| 343 | + logging.basicConfig(format=formatter, level=logging.INFO) | ||
| 344 | + try: | ||
| 345 | + main() | ||
| 346 | + except KeyboardInterrupt: | ||
| 347 | + print("\nCaught Ctrl + C. Exiting") | ||
| 348 | + killed = True | ||
| 349 | + sys.exit(0) |
| @@ -6,29 +6,30 @@ | @@ -6,29 +6,30 @@ | ||
| 6 | This file demonstrates how to use sherpa-onnx Python API to generate audio | 6 | This file demonstrates how to use sherpa-onnx Python API to generate audio |
| 7 | from text, i.e., text-to-speech. | 7 | from text, i.e., text-to-speech. |
| 8 | 8 | ||
| 9 | + | ||
| 10 | +Different from ./offline-tts-play.py, this file does not play back the | ||
| 11 | +generated audio. | ||
| 12 | + | ||
| 9 | Usage: | 13 | Usage: |
| 10 | 14 | ||
| 11 | -1. Download a model | 15 | +Example (1/2) |
| 12 | 16 | ||
| 13 | -wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/vits-ljs.onnx | ||
| 14 | -wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/lexicon.txt | ||
| 15 | -wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/tokens.txt | 17 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2 |
| 18 | +tar xf vits-piper-en_US-amy-low.tar.bz2 | ||
| 16 | 19 | ||
| 17 | python3 ./python-api-examples/offline-tts.py \ | 20 | python3 ./python-api-examples/offline-tts.py \ |
| 18 | - --vits-model=./vits-ljs.onnx \ | ||
| 19 | - --vits-lexicon=./lexicon.txt \ | ||
| 20 | - --vits-tokens=./tokens.txt \ | ||
| 21 | - --output-filename=./generated.wav \ | ||
| 22 | - 'liliana, the most beautiful and lovely assistant of our team!' | 21 | + --vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \ |
| 22 | + --vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \ | ||
| 23 | + --vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \ | ||
| 24 | + --output-filename=./generated.wav \ | ||
| 25 | + "Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar." | ||
| 23 | 26 | ||
| 24 | -2. Download a model | 27 | +Example (2/2) |
| 25 | 28 | ||
| 26 | -wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/vits-aishell3.onnx | ||
| 27 | -wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/lexicon.txt | ||
| 28 | -wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/tokens.txt | ||
| 29 | -wget https://huggingface.co/csukuangfj/vits-zh-aishell3/resolve/main/rule.fst | 29 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-zh-aishell3.tar.bz2 |
| 30 | +tar xvf vits-zh-aishell3.tar.bz2 | ||
| 30 | 31 | ||
| 31 | -python3 ./python-api-examples/offline-tts.py | 32 | +python3 ./python-api-examples/offline-tts.py \ |
| 32 | --vits-model=./vits-aishell3.onnx \ | 33 | --vits-model=./vits-aishell3.onnx \ |
| 33 | --vits-lexicon=./lexicon.txt \ | 34 | --vits-lexicon=./lexicon.txt \ |
| 34 | --vits-tokens=./tokens.txt \ | 35 | --vits-tokens=./tokens.txt \ |
| @@ -37,9 +38,13 @@ python3 ./python-api-examples/offline-tts.py | @@ -37,9 +38,13 @@ python3 ./python-api-examples/offline-tts.py | ||
| 37 | --output-filename=./liubei-21.wav \ | 38 | --output-filename=./liubei-21.wav \ |
| 38 | "勿以恶小而为之,勿以善小而不为。惟贤惟德,能服于人。122334" | 39 | "勿以恶小而为之,勿以善小而不为。惟贤惟德,能服于人。122334" |
| 39 | 40 | ||
| 41 | +You can find more models at | ||
| 42 | +https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models | ||
| 43 | + | ||
| 40 | Please see | 44 | Please see |
| 41 | https://k2-fsa.github.io/sherpa/onnx/tts/index.html | 45 | https://k2-fsa.github.io/sherpa/onnx/tts/index.html |
| 42 | for details. | 46 | for details. |
| 47 | + | ||
| 43 | """ | 48 | """ |
| 44 | 49 | ||
| 45 | import argparse | 50 | import argparse |
| @@ -59,6 +59,7 @@ def get_binaries_to_install(): | @@ -59,6 +59,7 @@ def get_binaries_to_install(): | ||
| 59 | binaries += ["sherpa-onnx-vad-microphone"] | 59 | binaries += ["sherpa-onnx-vad-microphone"] |
| 60 | binaries += ["sherpa-onnx-vad-microphone-offline-asr"] | 60 | binaries += ["sherpa-onnx-vad-microphone-offline-asr"] |
| 61 | binaries += ["sherpa-onnx-offline-tts"] | 61 | binaries += ["sherpa-onnx-offline-tts"] |
| 62 | + binaries += ["sherpa-onnx-offline-tts-play"] | ||
| 62 | if is_windows(): | 63 | if is_windows(): |
| 63 | binaries += ["kaldi-native-fbank-core.dll"] | 64 | binaries += ["kaldi-native-fbank-core.dll"] |
| 64 | binaries += ["sherpa-onnx-c-api.dll"] | 65 | binaries += ["sherpa-onnx-c-api.dll"] |
| @@ -575,10 +575,22 @@ SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts( | @@ -575,10 +575,22 @@ SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts( | ||
| 575 | 575 | ||
| 576 | void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts) { delete tts; } | 576 | void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts) { delete tts; } |
| 577 | 577 | ||
| 578 | +int32_t SherpaOnnxOfflineTtsSampleRate(const SherpaOnnxOfflineTts *tts) { | ||
| 579 | + return tts->impl->SampleRate(); | ||
| 580 | +} | ||
| 581 | + | ||
| 578 | const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate( | 582 | const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate( |
| 579 | const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, | 583 | const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, |
| 580 | float speed) { | 584 | float speed) { |
| 581 | - sherpa_onnx::GeneratedAudio audio = tts->impl->Generate(text, sid, speed); | 585 | + return SherpaOnnxOfflineTtsGenerateWithCallback(tts, text, sid, speed, |
| 586 | + nullptr); | ||
| 587 | +} | ||
| 588 | + | ||
| 589 | +const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerateWithCallback( | ||
| 590 | + const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, float speed, | ||
| 591 | + SherpaOnnxGeneratedAudioCallback callback) { | ||
| 592 | + sherpa_onnx::GeneratedAudio audio = | ||
| 593 | + tts->impl->Generate(text, sid, speed, callback); | ||
| 582 | 594 | ||
| 583 | if (audio.samples.empty()) { | 595 | if (audio.samples.empty()) { |
| 584 | return nullptr; | 596 | return nullptr; |
| @@ -596,7 +608,7 @@ const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate( | @@ -596,7 +608,7 @@ const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate( | ||
| 596 | return ans; | 608 | return ans; |
| 597 | } | 609 | } |
| 598 | 610 | ||
| 599 | -SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTtsGeneratedAudio( | 611 | +void SherpaOnnxDestroyOfflineTtsGeneratedAudio( |
| 600 | const SherpaOnnxGeneratedAudio *p) { | 612 | const SherpaOnnxGeneratedAudio *p) { |
| 601 | if (p) { | 613 | if (p) { |
| 602 | delete[] p->samples; | 614 | delete[] p->samples; |
| @@ -633,6 +633,9 @@ SHERPA_ONNX_API typedef struct SherpaOnnxGeneratedAudio { | @@ -633,6 +633,9 @@ SHERPA_ONNX_API typedef struct SherpaOnnxGeneratedAudio { | ||
| 633 | int32_t sample_rate; | 633 | int32_t sample_rate; |
| 634 | } SherpaOnnxGeneratedAudio; | 634 | } SherpaOnnxGeneratedAudio; |
| 635 | 635 | ||
| 636 | +typedef void (*SherpaOnnxGeneratedAudioCallback)(const float *samples, | ||
| 637 | + int32_t n); | ||
| 638 | + | ||
| 636 | SHERPA_ONNX_API typedef struct SherpaOnnxOfflineTts SherpaOnnxOfflineTts; | 639 | SHERPA_ONNX_API typedef struct SherpaOnnxOfflineTts SherpaOnnxOfflineTts; |
| 637 | 640 | ||
| 638 | // Create an instance of offline TTS. The user has to use DestroyOfflineTts() | 641 | // Create an instance of offline TTS. The user has to use DestroyOfflineTts() |
| @@ -643,13 +646,26 @@ SHERPA_ONNX_API SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts( | @@ -643,13 +646,26 @@ SHERPA_ONNX_API SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts( | ||
| 643 | // Free the pointer returned by CreateOfflineTts() | 646 | // Free the pointer returned by CreateOfflineTts() |
| 644 | SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts); | 647 | SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts); |
| 645 | 648 | ||
| 649 | +// Return the sample rate of the current TTS object | ||
| 650 | +SHERPA_ONNX_API int32_t | ||
| 651 | +SherpaOnnxOfflineTtsSampleRate(const SherpaOnnxOfflineTts *tts); | ||
| 652 | + | ||
| 646 | // Generate audio from the given text and speaker id (sid). | 653 | // Generate audio from the given text and speaker id (sid). |
| 647 | -// The user has to use DestroyOfflineTtsGeneratedAudio() to free the returned | ||
| 648 | -// pointer to avoid memory leak. | 654 | +// The user has to use DestroyOfflineTtsGeneratedAudio() to free the |
| 655 | +// returned pointer to avoid memory leak. | ||
| 649 | SHERPA_ONNX_API const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate( | 656 | SHERPA_ONNX_API const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate( |
| 650 | const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, | 657 | const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, |
| 651 | float speed); | 658 | float speed); |
| 652 | 659 | ||
| 660 | +// callback is called whenever SherpaOnnxOfflineTtsConfig.max_num_sentences | ||
| 661 | +// sentences have been processed. The pointer passed to the callback | ||
| 662 | +// is freed once the callback is returned. So the caller should not keep | ||
| 663 | +// a reference to it. | ||
| 664 | +SHERPA_ONNX_API const SherpaOnnxGeneratedAudio * | ||
| 665 | +SherpaOnnxOfflineTtsGenerateWithCallback( | ||
| 666 | + const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid, float speed, | ||
| 667 | + SherpaOnnxGeneratedAudioCallback callback); | ||
| 668 | + | ||
| 653 | SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTtsGeneratedAudio( | 669 | SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTtsGeneratedAudio( |
| 654 | const SherpaOnnxGeneratedAudio *p); | 670 | const SherpaOnnxGeneratedAudio *p); |
| 655 | 671 |
| @@ -165,30 +165,26 @@ add_executable(sherpa-onnx-offline sherpa-onnx-offline.cc) | @@ -165,30 +165,26 @@ add_executable(sherpa-onnx-offline sherpa-onnx-offline.cc) | ||
| 165 | add_executable(sherpa-onnx-offline-parallel sherpa-onnx-offline-parallel.cc) | 165 | add_executable(sherpa-onnx-offline-parallel sherpa-onnx-offline-parallel.cc) |
| 166 | add_executable(sherpa-onnx-offline-tts sherpa-onnx-offline-tts.cc) | 166 | add_executable(sherpa-onnx-offline-tts sherpa-onnx-offline-tts.cc) |
| 167 | 167 | ||
| 168 | +set(main_exes | ||
| 169 | + sherpa-onnx | ||
| 170 | + sherpa-onnx-offline | ||
| 171 | + sherpa-onnx-offline-parallel | ||
| 172 | + sherpa-onnx-offline-tts | ||
| 173 | +) | ||
| 168 | 174 | ||
| 169 | -target_link_libraries(sherpa-onnx sherpa-onnx-core) | ||
| 170 | -target_link_libraries(sherpa-onnx-offline sherpa-onnx-core) | ||
| 171 | -target_link_libraries(sherpa-onnx-offline-parallel sherpa-onnx-core) | ||
| 172 | -target_link_libraries(sherpa-onnx-offline-tts sherpa-onnx-core) | ||
| 173 | -if(NOT WIN32) | ||
| 174 | - target_link_libraries(sherpa-onnx "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib") | ||
| 175 | - target_link_libraries(sherpa-onnx "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib") | ||
| 176 | - | ||
| 177 | - target_link_libraries(sherpa-onnx-offline "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib") | ||
| 178 | - target_link_libraries(sherpa-onnx-offline "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib") | ||
| 179 | - | ||
| 180 | - target_link_libraries(sherpa-onnx-offline-parallel "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib") | ||
| 181 | - target_link_libraries(sherpa-onnx-offline-parallel "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib") | 175 | +foreach(exe IN LISTS main_exes) |
| 176 | + target_link_libraries(${exe} sherpa-onnx-core) | ||
| 177 | +endforeach() | ||
| 182 | 178 | ||
| 183 | - target_link_libraries(sherpa-onnx-offline-tts "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib") | ||
| 184 | - target_link_libraries(sherpa-onnx-offline-tts "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib") | 179 | +if(NOT WIN32) |
| 180 | + foreach(exe IN LISTS main_exes) | ||
| 181 | + target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib") | ||
| 182 | + target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib") | ||
| 185 | 183 | ||
| 186 | - if(SHERPA_ONNX_ENABLE_PYTHON) | ||
| 187 | - target_link_libraries(sherpa-onnx "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") | ||
| 188 | - target_link_libraries(sherpa-onnx-offline "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") | ||
| 189 | - target_link_libraries(sherpa-onnx-offline-parallel "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") | ||
| 190 | - target_link_libraries(sherpa-onnx-offline-tts "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") | ||
| 191 | - endif() | 184 | + if(SHERPA_ONNX_ENABLE_PYTHON) |
| 185 | + target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") | ||
| 186 | + endif() | ||
| 187 | + endforeach() | ||
| 192 | endif() | 188 | endif() |
| 193 | 189 | ||
| 194 | if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32) | 190 | if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32) |
| @@ -203,10 +199,7 @@ endif() | @@ -203,10 +199,7 @@ endif() | ||
| 203 | 199 | ||
| 204 | install( | 200 | install( |
| 205 | TARGETS | 201 | TARGETS |
| 206 | - sherpa-onnx | ||
| 207 | - sherpa-onnx-offline | ||
| 208 | - sherpa-onnx-offline-parallel | ||
| 209 | - sherpa-onnx-offline-tts | 202 | + ${main_exes} |
| 210 | DESTINATION | 203 | DESTINATION |
| 211 | bin | 204 | bin |
| 212 | ) | 205 | ) |
| @@ -224,6 +217,11 @@ if(SHERPA_ONNX_HAS_ALSA) | @@ -224,6 +217,11 @@ if(SHERPA_ONNX_HAS_ALSA) | ||
| 224 | endif() | 217 | endif() |
| 225 | 218 | ||
| 226 | if(SHERPA_ONNX_ENABLE_PORTAUDIO) | 219 | if(SHERPA_ONNX_ENABLE_PORTAUDIO) |
| 220 | + add_executable(sherpa-onnx-offline-tts-play | ||
| 221 | + sherpa-onnx-offline-tts-play.cc | ||
| 222 | + microphone.cc | ||
| 223 | + ) | ||
| 224 | + | ||
| 227 | add_executable(sherpa-onnx-microphone | 225 | add_executable(sherpa-onnx-microphone |
| 228 | sherpa-onnx-microphone.cc | 226 | sherpa-onnx-microphone.cc |
| 229 | microphone.cc | 227 | microphone.cc |
| @@ -251,6 +249,7 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO) | @@ -251,6 +249,7 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO) | ||
| 251 | endif() | 249 | endif() |
| 252 | 250 | ||
| 253 | set(exes | 251 | set(exes |
| 252 | + sherpa-onnx-offline-tts-play | ||
| 254 | sherpa-onnx-microphone | 253 | sherpa-onnx-microphone |
| 255 | sherpa-onnx-microphone-offline | 254 | sherpa-onnx-microphone-offline |
| 256 | sherpa-onnx-vad-microphone | 255 | sherpa-onnx-vad-microphone |
| @@ -267,7 +266,6 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO) | @@ -267,7 +266,6 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO) | ||
| 267 | endforeach() | 266 | endforeach() |
| 268 | 267 | ||
| 269 | if(SHERPA_ONNX_ENABLE_PYTHON) | 268 | if(SHERPA_ONNX_ENABLE_PYTHON) |
| 270 | - | ||
| 271 | foreach(exe IN LISTS exes) | 269 | foreach(exe IN LISTS exes) |
| 272 | target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") | 270 | target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib") |
| 273 | endforeach() | 271 | endforeach() |
| @@ -343,7 +341,6 @@ if(SHERPA_ONNX_ENABLE_WEBSOCKET) | @@ -343,7 +341,6 @@ if(SHERPA_ONNX_ENABLE_WEBSOCKET) | ||
| 343 | ) | 341 | ) |
| 344 | endif() | 342 | endif() |
| 345 | 343 | ||
| 346 | - | ||
| 347 | if(SHERPA_ONNX_ENABLE_TESTS) | 344 | if(SHERPA_ONNX_ENABLE_TESTS) |
| 348 | set(sherpa_onnx_test_srcs | 345 | set(sherpa_onnx_test_srcs |
| 349 | cat-test.cc | 346 | cat-test.cc |
| @@ -28,8 +28,12 @@ class OfflineTtsImpl { | @@ -28,8 +28,12 @@ class OfflineTtsImpl { | ||
| 28 | const OfflineTtsConfig &config); | 28 | const OfflineTtsConfig &config); |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | - virtual GeneratedAudio Generate(const std::string &text, int64_t sid = 0, | ||
| 32 | - float speed = 1.0) const = 0; | 31 | + virtual GeneratedAudio Generate( |
| 32 | + const std::string &text, int64_t sid = 0, float speed = 1.0, | ||
| 33 | + GeneratedAudioCallback callback = nullptr) const = 0; | ||
| 34 | + | ||
| 35 | + // Return the sample rate of the generated audio | ||
| 36 | + virtual int32_t SampleRate() const = 0; | ||
| 33 | }; | 37 | }; |
| 34 | 38 | ||
| 35 | } // namespace sherpa_onnx | 39 | } // namespace sherpa_onnx |
| @@ -69,8 +69,11 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | @@ -69,8 +69,11 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | ||
| 69 | } | 69 | } |
| 70 | #endif | 70 | #endif |
| 71 | 71 | ||
| 72 | - GeneratedAudio Generate(const std::string &_text, int64_t sid = 0, | ||
| 73 | - float speed = 1.0) const override { | 72 | + int32_t SampleRate() const override { return model_->SampleRate(); } |
| 73 | + | ||
| 74 | + GeneratedAudio Generate( | ||
| 75 | + const std::string &_text, int64_t sid = 0, float speed = 1.0, | ||
| 76 | + GeneratedAudioCallback callback = nullptr) const override { | ||
| 74 | int32_t num_speakers = model_->NumSpeakers(); | 77 | int32_t num_speakers = model_->NumSpeakers(); |
| 75 | if (num_speakers == 0 && sid != 0) { | 78 | if (num_speakers == 0 && sid != 0) { |
| 76 | SHERPA_ONNX_LOGE( | 79 | SHERPA_ONNX_LOGE( |
| @@ -118,7 +121,11 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | @@ -118,7 +121,11 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | ||
| 118 | int32_t x_size = static_cast<int32_t>(x.size()); | 121 | int32_t x_size = static_cast<int32_t>(x.size()); |
| 119 | 122 | ||
| 120 | if (config_.max_num_sentences <= 0 || x_size <= config_.max_num_sentences) { | 123 | if (config_.max_num_sentences <= 0 || x_size <= config_.max_num_sentences) { |
| 121 | - return Process(x, sid, speed); | 124 | + auto ans = Process(x, sid, speed); |
| 125 | + if (callback) { | ||
| 126 | + callback(ans.samples.data(), ans.samples.size()); | ||
| 127 | + } | ||
| 128 | + return ans; | ||
| 122 | } | 129 | } |
| 123 | 130 | ||
| 124 | // the input text is too long, we process sentences within it in batches | 131 | // the input text is too long, we process sentences within it in batches |
| @@ -149,6 +156,12 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | @@ -149,6 +156,12 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | ||
| 149 | ans.sample_rate = audio.sample_rate; | 156 | ans.sample_rate = audio.sample_rate; |
| 150 | ans.samples.insert(ans.samples.end(), audio.samples.begin(), | 157 | ans.samples.insert(ans.samples.end(), audio.samples.begin(), |
| 151 | audio.samples.end()); | 158 | audio.samples.end()); |
| 159 | + if (callback) { | ||
| 160 | + callback(audio.samples.data(), audio.samples.size()); | ||
| 161 | + // Caution(fangjun): audio is freed when the callback returns, so users | ||
| 162 | + // should copy the data if they want to access the data after | ||
| 163 | + // the callback returns to avoid segmentation fault. | ||
| 164 | + } | ||
| 152 | } | 165 | } |
| 153 | 166 | ||
| 154 | batch.clear(); | 167 | batch.clear(); |
| @@ -162,6 +175,12 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | @@ -162,6 +175,12 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl { | ||
| 162 | ans.sample_rate = audio.sample_rate; | 175 | ans.sample_rate = audio.sample_rate; |
| 163 | ans.samples.insert(ans.samples.end(), audio.samples.begin(), | 176 | ans.samples.insert(ans.samples.end(), audio.samples.begin(), |
| 164 | audio.samples.end()); | 177 | audio.samples.end()); |
| 178 | + if (callback) { | ||
| 179 | + callback(audio.samples.data(), audio.samples.size()); | ||
| 180 | + // Caution(fangjun): audio is freed when the callback returns, so users | ||
| 181 | + // should copy the data if they want to access the data after | ||
| 182 | + // the callback returns to avoid segmentation fault. | ||
| 183 | + } | ||
| 165 | } | 184 | } |
| 166 | 185 | ||
| 167 | return ans; | 186 | return ans; |
| @@ -65,9 +65,12 @@ OfflineTts::OfflineTts(AAssetManager *mgr, const OfflineTtsConfig &config) | @@ -65,9 +65,12 @@ OfflineTts::OfflineTts(AAssetManager *mgr, const OfflineTtsConfig &config) | ||
| 65 | 65 | ||
| 66 | OfflineTts::~OfflineTts() = default; | 66 | OfflineTts::~OfflineTts() = default; |
| 67 | 67 | ||
| 68 | -GeneratedAudio OfflineTts::Generate(const std::string &text, int64_t sid /*=0*/, | ||
| 69 | - float speed /*= 1.0*/) const { | ||
| 70 | - return impl_->Generate(text, sid, speed); | 68 | +GeneratedAudio OfflineTts::Generate( |
| 69 | + const std::string &text, int64_t sid /*=0*/, float speed /*= 1.0*/, | ||
| 70 | + GeneratedAudioCallback callback /*= nullptr*/) const { | ||
| 71 | + return impl_->Generate(text, sid, speed, callback); | ||
| 71 | } | 72 | } |
| 72 | 73 | ||
| 74 | +int32_t OfflineTts::SampleRate() const { return impl_->SampleRate(); } | ||
| 75 | + | ||
| 73 | } // namespace sherpa_onnx | 76 | } // namespace sherpa_onnx |
| @@ -5,6 +5,7 @@ | @@ -5,6 +5,7 @@ | ||
| 5 | #define SHERPA_ONNX_CSRC_OFFLINE_TTS_H_ | 5 | #define SHERPA_ONNX_CSRC_OFFLINE_TTS_H_ |
| 6 | 6 | ||
| 7 | #include <cstdint> | 7 | #include <cstdint> |
| 8 | +#include <functional> | ||
| 8 | #include <memory> | 9 | #include <memory> |
| 9 | #include <string> | 10 | #include <string> |
| 10 | #include <vector> | 11 | #include <vector> |
| @@ -53,6 +54,9 @@ struct GeneratedAudio { | @@ -53,6 +54,9 @@ struct GeneratedAudio { | ||
| 53 | 54 | ||
| 54 | class OfflineTtsImpl; | 55 | class OfflineTtsImpl; |
| 55 | 56 | ||
| 57 | +using GeneratedAudioCallback = | ||
| 58 | + std::function<void(const float * /*samples*/, int32_t /*n*/)>; | ||
| 59 | + | ||
| 56 | class OfflineTts { | 60 | class OfflineTts { |
| 57 | public: | 61 | public: |
| 58 | ~OfflineTts(); | 62 | ~OfflineTts(); |
| @@ -67,8 +71,20 @@ class OfflineTts { | @@ -67,8 +71,20 @@ class OfflineTts { | ||
| 67 | // trained using the VCTK dataset. It is not used for | 71 | // trained using the VCTK dataset. It is not used for |
| 68 | // single-speaker models, e.g., models trained using the ljspeech | 72 | // single-speaker models, e.g., models trained using the ljspeech |
| 69 | // dataset. | 73 | // dataset. |
| 74 | + // @param speed The speed for the generated speech. E.g., 2 means 2x faster. | ||
| 75 | + // @param callback If not NULL, it is called whenever config.max_num_sentences | ||
| 76 | + // sentences have been processed. Note that the passed | ||
| 77 | + // pointer `samples` for the callback might be invalidated | ||
| 78 | + // after the callback is returned, so the caller should not | ||
| 79 | + // keep a reference to it. The caller can copy the data if | ||
| 80 | + // he/she wants to access the samples after the callback | ||
| 81 | + // returns. The callback is called in the current thread. | ||
| 70 | GeneratedAudio Generate(const std::string &text, int64_t sid = 0, | 82 | GeneratedAudio Generate(const std::string &text, int64_t sid = 0, |
| 71 | - float speed = 1.0) const; | 83 | + float speed = 1.0, |
| 84 | + GeneratedAudioCallback callback = nullptr) const; | ||
| 85 | + | ||
| 86 | + // Return the sample rate of the generated audio | ||
| 87 | + int32_t SampleRate() const; | ||
| 72 | 88 | ||
| 73 | private: | 89 | private: |
| 74 | std::unique_ptr<OfflineTtsImpl> impl_; | 90 | std::unique_ptr<OfflineTtsImpl> impl_; |
| @@ -95,7 +95,8 @@ static std::vector<int64_t> PhonemesToIds( | @@ -95,7 +95,8 @@ static std::vector<int64_t> PhonemesToIds( | ||
| 95 | ans.push_back(token2id.at(p)); | 95 | ans.push_back(token2id.at(p)); |
| 96 | ans.push_back(pad); | 96 | ans.push_back(pad); |
| 97 | } else { | 97 | } else { |
| 98 | - SHERPA_ONNX_LOGE("Skip unkown phonemes. Unicode codepoint: \\U+%04x.", p); | 98 | + SHERPA_ONNX_LOGE("Skip unknown phonemes. Unicode codepoint: \\U+%04x.", |
| 99 | + static_cast<uint32_t>(p)); | ||
| 99 | } | 100 | } |
| 100 | } | 101 | } |
| 101 | ans.push_back(eos); | 102 | ans.push_back(eos); |
| 1 | +// sherpa-onnx/csrc/sherpa-onnx-offline-tts-play.cc | ||
| 2 | +// | ||
| 3 | +// Copyright (c) 2023 Xiaomi Corporation | ||
| 4 | + | ||
| 5 | +#include <signal.h> | ||
| 6 | + | ||
| 7 | +#include <algorithm> | ||
| 8 | +#include <chrono> // NOLINT | ||
| 9 | +#include <condition_variable> // NOLINT | ||
| 10 | +#include <fstream> | ||
| 11 | +#include <mutex> // NOLINT | ||
| 12 | +#include <queue> | ||
| 13 | +#include <thread> // NOLINT | ||
| 14 | +#include <vector> | ||
| 15 | + | ||
| 16 | +#include "portaudio.h" // NOLINT | ||
| 17 | +#include "sherpa-onnx/csrc/microphone.h" | ||
| 18 | +#include "sherpa-onnx/csrc/offline-tts.h" | ||
| 19 | +#include "sherpa-onnx/csrc/parse-options.h" | ||
| 20 | +#include "sherpa-onnx/csrc/wave-writer.h" | ||
| 21 | + | ||
| 22 | +static std::condition_variable g_cv; | ||
| 23 | +static std::mutex g_cv_m; | ||
| 24 | + | ||
| 25 | +struct Samples { | ||
| 26 | + std::vector<float> data; | ||
| 27 | + int32_t consumed = 0; | ||
| 28 | +}; | ||
| 29 | + | ||
| 30 | +struct Buffer { | ||
| 31 | + std::queue<Samples> samples; | ||
| 32 | + std::mutex mutex; | ||
| 33 | +}; | ||
| 34 | + | ||
| 35 | +static Buffer g_buffer; | ||
| 36 | + | ||
| 37 | +static bool g_started = false; | ||
| 38 | +static bool g_stopped = false; | ||
| 39 | +static bool g_killed = false; | ||
| 40 | + | ||
| 41 | +static void Handler(int32_t /*sig*/) { | ||
| 42 | + if (g_killed) { | ||
| 43 | + exit(0); | ||
| 44 | + } | ||
| 45 | + | ||
| 46 | + g_killed = true; | ||
| 47 | + fprintf(stderr, "\nCaught Ctrl + C. Exiting\n"); | ||
| 48 | +} | ||
| 49 | + | ||
| 50 | +static void AudioGeneratedCallback(const float *s, int32_t n) { | ||
| 51 | + if (n > 0) { | ||
| 52 | + Samples samples; | ||
| 53 | + samples.data = std::vector<float>{s, s + n}; | ||
| 54 | + | ||
| 55 | + std::lock_guard<std::mutex> lock(g_buffer.mutex); | ||
| 56 | + g_buffer.samples.push(std::move(samples)); | ||
| 57 | + g_started = true; | ||
| 58 | + } | ||
| 59 | +} | ||
| 60 | + | ||
| 61 | +static int PlayCallback(const void * /*in*/, void *out, | ||
| 62 | + unsigned long n, // NOLINT | ||
| 63 | + const PaStreamCallbackTimeInfo * /*time_info*/, | ||
| 64 | + PaStreamCallbackFlags /*status_flags*/, | ||
| 65 | + void * /*user_data*/) { | ||
| 66 | + if (g_killed) { | ||
| 67 | + return paComplete; | ||
| 68 | + } | ||
| 69 | + | ||
| 70 | + float *pout = reinterpret_cast<float *>(out); | ||
| 71 | + std::lock_guard<std::mutex> lock(g_buffer.mutex); | ||
| 72 | + | ||
| 73 | + if (g_buffer.samples.empty()) { | ||
| 74 | + if (g_stopped) { | ||
| 75 | + // no more data is available and we have processed all of the samples | ||
| 76 | + return paComplete; | ||
| 77 | + } | ||
| 78 | + | ||
| 79 | + // The current sentence is so long, though very unlikely, that | ||
| 80 | + // the model has not finished processing it yet. | ||
| 81 | + std::fill_n(pout, n, 0); | ||
| 82 | + | ||
| 83 | + return paContinue; | ||
| 84 | + } | ||
| 85 | + | ||
| 86 | + int32_t k = 0; | ||
| 87 | + for (; k < n && !g_buffer.samples.empty();) { | ||
| 88 | + int32_t this_block = n - k; | ||
| 89 | + | ||
| 90 | + auto &p = g_buffer.samples.front(); | ||
| 91 | + | ||
| 92 | + int32_t remaining = p.data.size() - p.consumed; | ||
| 93 | + | ||
| 94 | + if (this_block <= remaining) { | ||
| 95 | + std::copy(p.data.begin() + p.consumed, | ||
| 96 | + p.data.begin() + p.consumed + this_block, pout + k); | ||
| 97 | + p.consumed += this_block; | ||
| 98 | + | ||
| 99 | + k = n; | ||
| 100 | + | ||
| 101 | + if (p.consumed == p.data.size()) { | ||
| 102 | + g_buffer.samples.pop(); | ||
| 103 | + } | ||
| 104 | + break; | ||
| 105 | + } | ||
| 106 | + | ||
| 107 | + std::copy(p.data.begin() + p.consumed, p.data.end(), pout + k); | ||
| 108 | + k += p.data.size() - p.consumed; | ||
| 109 | + g_buffer.samples.pop(); | ||
| 110 | + } | ||
| 111 | + | ||
| 112 | + if (k < n) { | ||
| 113 | + std::fill_n(pout + k, n - k, 0); | ||
| 114 | + } | ||
| 115 | + | ||
| 116 | + if (g_stopped && g_buffer.samples.empty()) { | ||
| 117 | + return paComplete; | ||
| 118 | + } | ||
| 119 | + | ||
| 120 | + return paContinue; | ||
| 121 | +} | ||
| 122 | + | ||
| 123 | +static void PlayCallbackFinished(void *userData) { g_cv.notify_all(); } | ||
| 124 | + | ||
| 125 | +static void StartPlayback(int32_t sample_rate) { | ||
| 126 | + int32_t frames_per_buffer = 1024; | ||
| 127 | + PaStreamParameters outputParameters; | ||
| 128 | + PaStream *stream; | ||
| 129 | + PaError err; | ||
| 130 | + | ||
| 131 | + outputParameters.device = | ||
| 132 | + Pa_GetDefaultOutputDevice(); /* default output device */ | ||
| 133 | + | ||
| 134 | + outputParameters.channelCount = 1; /* stereo output */ | ||
| 135 | + outputParameters.sampleFormat = paFloat32; /* 32 bit floating point output */ | ||
| 136 | + outputParameters.suggestedLatency = | ||
| 137 | + Pa_GetDeviceInfo(outputParameters.device)->defaultLowOutputLatency; | ||
| 138 | + outputParameters.hostApiSpecificStreamInfo = nullptr; | ||
| 139 | + | ||
| 140 | + err = Pa_OpenStream(&stream, nullptr, /* no input */ | ||
| 141 | + &outputParameters, sample_rate, frames_per_buffer, | ||
| 142 | + paClipOff, // we won't output out of range samples so | ||
| 143 | + // don't bother clipping them | ||
| 144 | + PlayCallback, nullptr); | ||
| 145 | + if (err != paNoError) { | ||
| 146 | + fprintf(stderr, "%d portaudio error: %s\n", __LINE__, Pa_GetErrorText(err)); | ||
| 147 | + return; | ||
| 148 | + } | ||
| 149 | + | ||
| 150 | + err = Pa_SetStreamFinishedCallback(stream, &PlayCallbackFinished); | ||
| 151 | + if (err != paNoError) { | ||
| 152 | + fprintf(stderr, "%d portaudio error: %s\n", __LINE__, Pa_GetErrorText(err)); | ||
| 153 | + return; | ||
| 154 | + } | ||
| 155 | + | ||
| 156 | + err = Pa_StartStream(stream); | ||
| 157 | + if (err != paNoError) { | ||
| 158 | + fprintf(stderr, "%d portaudio error: %s\n", __LINE__, Pa_GetErrorText(err)); | ||
| 159 | + return; | ||
| 160 | + } | ||
| 161 | + | ||
| 162 | + std::unique_lock<std::mutex> lock(g_cv_m); | ||
| 163 | + while (!g_killed && !g_stopped && | ||
| 164 | + (!g_started || (g_started && !g_buffer.samples.empty()))) { | ||
| 165 | + g_cv.wait(lock); | ||
| 166 | + } | ||
| 167 | + | ||
| 168 | + err = Pa_StopStream(stream); | ||
| 169 | + if (err != paNoError) { | ||
| 170 | + return; | ||
| 171 | + } | ||
| 172 | + | ||
| 173 | + err = Pa_CloseStream(stream); | ||
| 174 | + if (err != paNoError) { | ||
| 175 | + return; | ||
| 176 | + } | ||
| 177 | +} | ||
| 178 | + | ||
| 179 | +int main(int32_t argc, char *argv[]) { | ||
| 180 | + signal(SIGINT, Handler); | ||
| 181 | + | ||
| 182 | + const char *kUsageMessage = R"usage( | ||
| 183 | +Offline text-to-speech with sherpa-onnx. | ||
| 184 | + | ||
| 185 | +It plays the generated audio as the model is processing. | ||
| 186 | + | ||
| 187 | +Usage example: | ||
| 188 | + | ||
| 189 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2 | ||
| 190 | +tar xf vits-piper-en_US-amy-low.tar.bz2 | ||
| 191 | + | ||
| 192 | +./bin/sherpa-onnx-offline-tts-play \ | ||
| 193 | + --vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \ | ||
| 194 | + --vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \ | ||
| 195 | + --vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \ | ||
| 196 | + --output-filename=./generated.wav \ | ||
| 197 | + "Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar." | ||
| 198 | + | ||
| 199 | +It will generate a file ./generated.wav as specified by --output-filename. | ||
| 200 | + | ||
| 201 | +You can find more models at | ||
| 202 | +https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models | ||
| 203 | + | ||
| 204 | +Please see | ||
| 205 | +https://k2-fsa.github.io/sherpa/onnx/tts/index.html | ||
| 206 | +or details. | ||
| 207 | +)usage"; | ||
| 208 | + | ||
| 209 | + sherpa_onnx::ParseOptions po(kUsageMessage); | ||
| 210 | + std::string output_filename = "./generated.wav"; | ||
| 211 | + int32_t sid = 0; | ||
| 212 | + | ||
| 213 | + po.Register("output-filename", &output_filename, | ||
| 214 | + "Path to save the generated audio"); | ||
| 215 | + | ||
| 216 | + po.Register("sid", &sid, | ||
| 217 | + "Speaker ID. Used only for multi-speaker models, e.g., models " | ||
| 218 | + "trained using the VCTK dataset. Not used for single-speaker " | ||
| 219 | + "models, e.g., models trained using the LJSpeech dataset"); | ||
| 220 | + | ||
| 221 | + sherpa_onnx::OfflineTtsConfig config; | ||
| 222 | + | ||
| 223 | + config.Register(&po); | ||
| 224 | + po.Read(argc, argv); | ||
| 225 | + | ||
| 226 | + if (po.NumArgs() == 0) { | ||
| 227 | + fprintf(stderr, "Error: Please provide the text to generate audio.\n\n"); | ||
| 228 | + po.PrintUsage(); | ||
| 229 | + exit(EXIT_FAILURE); | ||
| 230 | + } | ||
| 231 | + | ||
| 232 | + if (po.NumArgs() > 1) { | ||
| 233 | + fprintf(stderr, | ||
| 234 | + "Error: Accept only one positional argument. Please use single " | ||
| 235 | + "quotes to wrap your text\n"); | ||
| 236 | + po.PrintUsage(); | ||
| 237 | + exit(EXIT_FAILURE); | ||
| 238 | + } | ||
| 239 | + | ||
| 240 | + if (!config.Validate()) { | ||
| 241 | + fprintf(stderr, "Errors in config!\n"); | ||
| 242 | + exit(EXIT_FAILURE); | ||
| 243 | + } | ||
| 244 | + | ||
| 245 | + sherpa_onnx::Microphone mic; | ||
| 246 | + | ||
| 247 | + PaDeviceIndex num_devices = Pa_GetDeviceCount(); | ||
| 248 | + fprintf(stderr, "Num devices: %d\n", num_devices); | ||
| 249 | + | ||
| 250 | + PaStreamParameters param; | ||
| 251 | + | ||
| 252 | + param.device = Pa_GetDefaultOutputDevice(); | ||
| 253 | + if (param.device == paNoDevice) { | ||
| 254 | + fprintf(stderr, "No default output device found\n"); | ||
| 255 | + exit(EXIT_FAILURE); | ||
| 256 | + } | ||
| 257 | + fprintf(stderr, "Use default device: %d\n", param.device); | ||
| 258 | + | ||
| 259 | + const PaDeviceInfo *info = Pa_GetDeviceInfo(param.device); | ||
| 260 | + fprintf(stderr, " Name: %s\n", info->name); | ||
| 261 | + fprintf(stderr, " Max output channels: %d\n", info->maxOutputChannels); | ||
| 262 | + | ||
| 263 | + if (config.max_num_sentences != 1) { | ||
| 264 | + fprintf(stderr, "Setting config.max_num_sentences to 1\n"); | ||
| 265 | + config.max_num_sentences = 1; | ||
| 266 | + } | ||
| 267 | + | ||
| 268 | + fprintf(stderr, "Loading the model\n"); | ||
| 269 | + sherpa_onnx::OfflineTts tts(config); | ||
| 270 | + | ||
| 271 | + fprintf(stderr, "Start the playback thread\n"); | ||
| 272 | + std::thread playback_thread(StartPlayback, tts.SampleRate()); | ||
| 273 | + | ||
| 274 | + float speed = 1.0; | ||
| 275 | + | ||
| 276 | + fprintf(stderr, "Generating ...\n"); | ||
| 277 | + const auto begin = std::chrono::steady_clock::now(); | ||
| 278 | + auto audio = tts.Generate(po.GetArg(1), sid, speed, AudioGeneratedCallback); | ||
| 279 | + const auto end = std::chrono::steady_clock::now(); | ||
| 280 | + g_stopped = true; | ||
| 281 | + fprintf(stderr, "Generating done!\n"); | ||
| 282 | + if (audio.samples.empty()) { | ||
| 283 | + fprintf( | ||
| 284 | + stderr, | ||
| 285 | + "Error in generating audio. Please read previous error messages.\n"); | ||
| 286 | + exit(EXIT_FAILURE); | ||
| 287 | + } | ||
| 288 | + | ||
| 289 | + float elapsed_seconds = | ||
| 290 | + std::chrono::duration_cast<std::chrono::milliseconds>(end - begin) | ||
| 291 | + .count() / | ||
| 292 | + 1000.; | ||
| 293 | + float duration = audio.samples.size() / static_cast<float>(audio.sample_rate); | ||
| 294 | + | ||
| 295 | + float rtf = elapsed_seconds / duration; | ||
| 296 | + fprintf(stderr, "Elapsed seconds: %.3f s\n", elapsed_seconds); | ||
| 297 | + fprintf(stderr, "Audio duration: %.3f s\n", duration); | ||
| 298 | + fprintf(stderr, "Real-time factor (RTF): %.3f/%.3f = %.3f\n", elapsed_seconds, | ||
| 299 | + duration, rtf); | ||
| 300 | + | ||
| 301 | + bool ok = sherpa_onnx::WriteWave(output_filename, audio.sample_rate, | ||
| 302 | + audio.samples.data(), audio.samples.size()); | ||
| 303 | + if (!ok) { | ||
| 304 | + fprintf(stderr, "Failed to write wave to %s\n", output_filename.c_str()); | ||
| 305 | + exit(EXIT_FAILURE); | ||
| 306 | + } | ||
| 307 | + | ||
| 308 | + fprintf(stderr, "The text is: %s. Speaker ID: %d\n\n", po.GetArg(1).c_str(), | ||
| 309 | + sid); | ||
| 310 | + fprintf(stderr, "\n**** Saved to %s successfully! ****\n", | ||
| 311 | + output_filename.c_str()); | ||
| 312 | + | ||
| 313 | + fprintf(stderr, "\n"); | ||
| 314 | + fprintf( | ||
| 315 | + stderr, | ||
| 316 | + "Wait for the playback to finish. You can safely press ctrl + C to stop " | ||
| 317 | + "the playback.\n"); | ||
| 318 | + playback_thread.join(); | ||
| 319 | + | ||
| 320 | + fprintf(stderr, "Done!\n"); | ||
| 321 | + | ||
| 322 | + return 0; | ||
| 323 | +} |
| @@ -2,6 +2,7 @@ | @@ -2,6 +2,7 @@ | ||
| 2 | // | 2 | // |
| 3 | // Copyright (c) 2023 Xiaomi Corporation | 3 | // Copyright (c) 2023 Xiaomi Corporation |
| 4 | 4 | ||
| 5 | +#include <chrono> // NOLINT | ||
| 5 | #include <fstream> | 6 | #include <fstream> |
| 6 | 7 | ||
| 7 | #include "sherpa-onnx/csrc/offline-tts.h" | 8 | #include "sherpa-onnx/csrc/offline-tts.h" |
| @@ -12,31 +13,22 @@ int main(int32_t argc, char *argv[]) { | @@ -12,31 +13,22 @@ int main(int32_t argc, char *argv[]) { | ||
| 12 | const char *kUsageMessage = R"usage( | 13 | const char *kUsageMessage = R"usage( |
| 13 | Offline text-to-speech with sherpa-onnx | 14 | Offline text-to-speech with sherpa-onnx |
| 14 | 15 | ||
| 16 | +Usage example: | ||
| 17 | + | ||
| 18 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2 | ||
| 19 | +tar xf vits-piper-en_US-amy-low.tar.bz2 | ||
| 20 | + | ||
| 15 | ./bin/sherpa-onnx-offline-tts \ | 21 | ./bin/sherpa-onnx-offline-tts \ |
| 16 | - --vits-model=/path/to/model.onnx \ | ||
| 17 | - --vits-lexicon=/path/to/lexicon.txt \ | ||
| 18 | - --vits-tokens=/path/to/tokens.txt \ | ||
| 19 | - --sid=0 \ | 22 | + --vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \ |
| 23 | + --vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \ | ||
| 24 | + --vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \ | ||
| 20 | --output-filename=./generated.wav \ | 25 | --output-filename=./generated.wav \ |
| 21 | - 'some text within single quotes on linux/macos or use double quotes on windows' | 26 | + "Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar." |
| 22 | 27 | ||
| 23 | It will generate a file ./generated.wav as specified by --output-filename. | 28 | It will generate a file ./generated.wav as specified by --output-filename. |
| 24 | 29 | ||
| 25 | -You can download a test model from | ||
| 26 | -https://huggingface.co/csukuangfj/vits-ljs | ||
| 27 | - | ||
| 28 | -For instance, you can use: | ||
| 29 | -wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/vits-ljs.onnx | ||
| 30 | -wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/lexicon.txt | ||
| 31 | -wget https://huggingface.co/csukuangfj/vits-ljs/resolve/main/tokens.txt | ||
| 32 | - | ||
| 33 | -./bin/sherpa-onnx-offline-tts \ | ||
| 34 | - --vits-model=./vits-ljs.onnx \ | ||
| 35 | - --vits-lexicon=./lexicon.txt \ | ||
| 36 | - --vits-tokens=./tokens.txt \ | ||
| 37 | - --sid=0 \ | ||
| 38 | - --output-filename=./generated.wav \ | ||
| 39 | - 'liliana, the most beautiful and lovely assistant of our team!' | 30 | +You can find more models at |
| 31 | +https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models | ||
| 40 | 32 | ||
| 41 | Please see | 33 | Please see |
| 42 | https://k2-fsa.github.io/sherpa/onnx/tts/index.html | 34 | https://k2-fsa.github.io/sherpa/onnx/tts/index.html |
| @@ -80,14 +72,30 @@ or details. | @@ -80,14 +72,30 @@ or details. | ||
| 80 | } | 72 | } |
| 81 | 73 | ||
| 82 | sherpa_onnx::OfflineTts tts(config); | 74 | sherpa_onnx::OfflineTts tts(config); |
| 75 | + | ||
| 76 | + const auto begin = std::chrono::steady_clock::now(); | ||
| 83 | auto audio = tts.Generate(po.GetArg(1), sid); | 77 | auto audio = tts.Generate(po.GetArg(1), sid); |
| 78 | + const auto end = std::chrono::steady_clock::now(); | ||
| 79 | + | ||
| 84 | if (audio.samples.empty()) { | 80 | if (audio.samples.empty()) { |
| 85 | fprintf( | 81 | fprintf( |
| 86 | stderr, | 82 | stderr, |
| 87 | - "Error in generating audios. Please read previous error messages.\n"); | 83 | + "Error in generating audio. Please read previous error messages.\n"); |
| 88 | exit(EXIT_FAILURE); | 84 | exit(EXIT_FAILURE); |
| 89 | } | 85 | } |
| 90 | 86 | ||
| 87 | + float elapsed_seconds = | ||
| 88 | + std::chrono::duration_cast<std::chrono::milliseconds>(end - begin) | ||
| 89 | + .count() / | ||
| 90 | + 1000.; | ||
| 91 | + float duration = audio.samples.size() / static_cast<float>(audio.sample_rate); | ||
| 92 | + | ||
| 93 | + float rtf = elapsed_seconds / duration; | ||
| 94 | + fprintf(stderr, "Elapsed seconds: %.3f s\n", elapsed_seconds); | ||
| 95 | + fprintf(stderr, "Audio duration: %.3f s\n", duration); | ||
| 96 | + fprintf(stderr, "Real-time factor (RTF): %.3f/%.3f = %.3f\n", elapsed_seconds, | ||
| 97 | + duration, rtf); | ||
| 98 | + | ||
| 91 | bool ok = sherpa_onnx::WriteWave(output_filename, audio.sample_rate, | 99 | bool ok = sherpa_onnx::WriteWave(output_filename, audio.sample_rate, |
| 92 | audio.samples.data(), audio.samples.size()); | 100 | audio.samples.data(), audio.samples.size()); |
| 93 | if (!ok) { | 101 | if (!ok) { |
| @@ -3,6 +3,7 @@ | @@ -3,6 +3,7 @@ | ||
| 3 | // Copyright (c) 2023 Xiaomi Corporation | 3 | // Copyright (c) 2023 Xiaomi Corporation |
| 4 | #include "sherpa-onnx/python/csrc/offline-tts.h" | 4 | #include "sherpa-onnx/python/csrc/offline-tts.h" |
| 5 | 5 | ||
| 6 | +#include <algorithm> | ||
| 6 | #include <string> | 7 | #include <string> |
| 7 | 8 | ||
| 8 | #include "sherpa-onnx/csrc/offline-tts.h" | 9 | #include "sherpa-onnx/csrc/offline-tts.h" |
| @@ -48,8 +49,35 @@ void PybindOfflineTts(py::module *m) { | @@ -48,8 +49,35 @@ void PybindOfflineTts(py::module *m) { | ||
| 48 | using PyClass = OfflineTts; | 49 | using PyClass = OfflineTts; |
| 49 | py::class_<PyClass>(*m, "OfflineTts") | 50 | py::class_<PyClass>(*m, "OfflineTts") |
| 50 | .def(py::init<const OfflineTtsConfig &>(), py::arg("config")) | 51 | .def(py::init<const OfflineTtsConfig &>(), py::arg("config")) |
| 51 | - .def("generate", &PyClass::Generate, py::arg("text"), py::arg("sid") = 0, | ||
| 52 | - py::arg("speed") = 1.0, py::call_guard<py::gil_scoped_release>()); | 52 | + .def_property_readonly("sample_rate", &PyClass::SampleRate) |
| 53 | + .def( | ||
| 54 | + "generate", | ||
| 55 | + [](const PyClass &self, const std::string &text, int64_t sid, | ||
| 56 | + float speed, std::function<void(py::array_t<float>)> callback) | ||
| 57 | + -> GeneratedAudio { | ||
| 58 | + if (!callback) { | ||
| 59 | + return self.Generate(text, sid, speed); | ||
| 60 | + } | ||
| 61 | + | ||
| 62 | + std::function<void(const float *, int32_t)> callback_wrapper = | ||
| 63 | + [callback](const float *samples, int32_t n) { | ||
| 64 | + // CAUTION(fangjun): we have to copy samples since it is | ||
| 65 | + // freed once the call back returns. | ||
| 66 | + | ||
| 67 | + pybind11::gil_scoped_acquire acquire; | ||
| 68 | + | ||
| 69 | + pybind11::array_t<float> array(n); | ||
| 70 | + py::buffer_info buf = array.request(); | ||
| 71 | + auto p = static_cast<float *>(buf.ptr); | ||
| 72 | + std::copy(samples, samples + n, p); | ||
| 73 | + callback(array); | ||
| 74 | + }; | ||
| 75 | + | ||
| 76 | + return self.Generate(text, sid, speed, callback_wrapper); | ||
| 77 | + }, | ||
| 78 | + py::arg("text"), py::arg("sid") = 0, py::arg("speed") = 1.0, | ||
| 79 | + py::arg("callback") = py::none(), | ||
| 80 | + py::call_guard<py::gil_scoped_release>()); | ||
| 53 | } | 81 | } |
| 54 | 82 | ||
| 55 | } // namespace sherpa_onnx | 83 | } // namespace sherpa_onnx |
| @@ -5,6 +5,7 @@ | @@ -5,6 +5,7 @@ | ||
| 5 | #ifndef SHERPA_ONNX_PYTHON_CSRC_SHERPA_ONNX_H_ | 5 | #ifndef SHERPA_ONNX_PYTHON_CSRC_SHERPA_ONNX_H_ |
| 6 | #define SHERPA_ONNX_PYTHON_CSRC_SHERPA_ONNX_H_ | 6 | #define SHERPA_ONNX_PYTHON_CSRC_SHERPA_ONNX_H_ |
| 7 | 7 | ||
| 8 | +#include "pybind11/functional.h" | ||
| 8 | #include "pybind11/numpy.h" | 9 | #include "pybind11/numpy.h" |
| 9 | #include "pybind11/pybind11.h" | 10 | #include "pybind11/pybind11.h" |
| 10 | #include "pybind11/stl.h" | 11 | #include "pybind11/stl.h" |
-
请 注册 或 登录 后发表评论