Fangjun Kuang
Committed by GitHub

Use piper-phonemize to convert text to token IDs (#453)

正在显示 55 个修改的文件 包含 1037 行增加181 行删除
... ... @@ -52,14 +52,13 @@ node ./test-online-transducer.js
rm -rf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20
# offline tts
curl -LS -O https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-vctk.tar.bz2
tar xvf vits-vctk.tar.bz2
rm vits-vctk.tar.bz2
curl -LS -O https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
node ./test-offline-tts-en.js
rm -rf vits-vctk
rm vits-piper-en_US-amy-low.tar.bz2
curl -LS -O https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-zh-aishell3.tar.bz2
tar xvf vits-zh-aishell3.tar.bz2
rm vits-zh-aishell3.tar.bz2
node ./test-offline-tts-zh.js
rm -rf vits-zh-aishell3
rm vits-zh-aishell3.tar.bz2
... ...
... ... @@ -17,6 +17,24 @@ which $EXE
mkdir ./tts
log "------------------------------------------------------------"
log "vits-piper-en_US-amy-low"
log "------------------------------------------------------------"
curl -O -SL https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
rm vits-piper-en_US-amy-low.tar.bz2
$EXE \
--vits-model=./vits-piper-en_US-amy-low/en_US-amy-low.onnx \
--vits-tokens=./vits-piper-en_US-amy-low/tokens.txt \
--vits-data-dir=./vits-piper-en_US-amy-low/espeak-ng-data \
--debug=1 \
--output-filename=./tts/amy.wav \
"“Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar.” The sun shone bleakly in the sky, its meager light struggling to penetrate the thick foliage of the forest. Birds sang their songs up in the crowns of the trees, fluttering from one branch to the other. A blanket of total tranquility lied over the forest. The peace was only broken by the steady gallop of the horses of the soldiers who were traveling to their upcoming knighting the morrow at Camelot, and rowdy conversation. “Finally we will get what we deserve,” “It’s been about time,” Perceval agreed. “We’ve been risking our arses for the past two years. It’s the least they could give us.” Merlin remained ostensibly silent, refusing to join the verbal parade of self-aggrandizing his fellow soldiers have engaged in. He found it difficult to happy about anything, when even if they had won the war, he had lost everything else in the process."
file ./tts/amy.wav
rm -rf vits-piper-en_US-amy-low
log "------------------------------------------------------------"
log "vits-ljs test"
log "------------------------------------------------------------"
... ...
... ... @@ -26,8 +26,8 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest]
total: ["12"]
index: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"]
total: ["30"]
index: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29"]
steps:
- uses: actions/checkout@v4
... ...
name: test-build-wheel
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
concurrency:
group: test-build-wheel-${{ github.ref }}
cancel-in-progress: true
jobs:
test-build-wheel:
name: ${{ matrix.os }} ${{ matrix.python_version }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: ["3.8", "3.9", "3.10", "3.11"]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: ccache
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ matrix.os }}-${{ matrix.python_version }}
- name: Install python dependencies
shell: bash
run: |
python3 -m pip install --upgrade pip
python3 -m pip install wheel twine setuptools
- name: Build
shell: bash
run: |
export CMAKE_CXX_COMPILER_LAUNCHER=ccache
export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH"
cmake --version
export SHERPA_ONNX_MAKE_ARGS="VERBOSE=1 -j"
python3 setup.py bdist_wheel
ls -lh dist
- name: Display wheel
shell: bash
run: |
ls -lh dist
- name: Install wheel
shell: bash
run: |
pip install --verbose ./dist/*.whl
- name: Test
shell: bash
run: |
# For windows
export PATH=/c/hostedtoolcache/windows/Python/3.7.9/x64/bin:$PATH
export PATH=/c/hostedtoolcache/windows/Python/3.8.10/x64/bin:$PATH
export PATH=/c/hostedtoolcache/windows/Python/3.9.13/x64/bin:$PATH
export PATH=/c/hostedtoolcache/windows/Python/3.10.11/x64/bin:$PATH
export PATH=/c/hostedtoolcache/windows/Python/3.11.6/x64/bin:$PATH
which sherpa-onnx
sherpa-onnx --help
... ...
... ... @@ -70,6 +70,10 @@ jobs:
mkdir -p scripts/nodejs/lib/win-x64
dst=scripts/nodejs/lib/win-x64
fi
ls -lh build/install/lib/
rm -rf build/install/lib/pkgconfig
cp -v build/install/lib/* $dst/
- name: replace files
... ...
... ... @@ -77,3 +77,6 @@ xcuserdata/
vits-vctk
vits-zh-aishell3
jslint.mjs
vits-piper-en_US-amy-low
vits-piper-*-*-*
log
... ...
... ... @@ -2,6 +2,8 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools">
<uses-permission android:name="android.permission.WRITE_INTERNAL_STORAGE" />
<application
android:allowBackup="true"
android:dataExtractionRules="@xml/data_extraction_rules"
... ...
package com.k2fsa.sherpa.onnx
import android.content.res.AssetManager
import android.media.MediaPlayer
import android.net.Uri
import android.os.Bundle
... ... @@ -9,6 +10,8 @@ import android.widget.EditText
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import java.io.File
import java.io.FileOutputStream
import java.io.IOException
const val TAG = "sherpa-onnx"
... ... @@ -19,7 +22,6 @@ class MainActivity : AppCompatActivity() {
private lateinit var speed: EditText
private lateinit var generate: Button
private lateinit var play: Button
private var hasFile: Boolean = false
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
... ... @@ -46,10 +48,10 @@ class MainActivity : AppCompatActivity() {
val sampleText = ""
text.setText(sampleText)
play.isEnabled = false;
play.isEnabled = false
}
fun onClickGenerate() {
private fun onClickGenerate() {
val sidInt = sid.text.toString().toIntOrNull()
if (sidInt == null || sidInt < 0) {
Toast.makeText(
... ... @@ -77,7 +79,7 @@ class MainActivity : AppCompatActivity() {
return
}
play.isEnabled = false;
play.isEnabled = false
val audio = tts.generate(text = textStr, sid = sidInt, speed = speedFloat)
val filename = application.filesDir.absolutePath + "/generated.wav"
... ... @@ -89,7 +91,7 @@ class MainActivity : AppCompatActivity() {
}
}
fun onClickPlay() {
private fun onClickPlay() {
val filename = application.filesDir.absolutePath + "/generated.wav"
val mediaPlayer = MediaPlayer.create(
applicationContext,
... ... @@ -98,10 +100,13 @@ class MainActivity : AppCompatActivity() {
mediaPlayer.start()
}
fun initTts() {
var modelDir :String?
var modelName :String?
private fun initTts() {
var modelDir: String?
var modelName: String?
var ruleFsts: String?
var lexicon: String?
var dataDir: String?
var assets: AssetManager? = application.assets
// The purpose of such a design is to make the CI test easier
// Please see
... ... @@ -109,21 +114,90 @@ class MainActivity : AppCompatActivity() {
modelDir = null
modelName = null
ruleFsts = null
lexicon = null
dataDir = null
// Example 1:
// modelDir = "vits-vctk"
// modelName = "vits-vctk.onnx"
// lexicon = "lexicon.txt"
// Example 2:
// modelDir = "vits-piper-en_US-lessac-medium"
// modelName = "en_US-lessac-medium.onnx"
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
// https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
// modelDir = "vits-piper-en_US-amy-low"
// modelName = "en_US-amy-low.onnx"
// dataDir = "vits-piper-en_US-amy-low/espeak-ng-data"
// Example 3:
// modelDir = "vits-zh-aishell3"
// modelName = "vits-aishell3.onnx"
// ruleFsts = "vits-zh-aishell3/rule.fst"
// lexcion = "lexicon.txt"
val config = getOfflineTtsConfig(modelDir = modelDir!!, modelName = modelName!!, ruleFsts = ruleFsts ?: "")!!
tts = OfflineTts(assetManager = application.assets, config = config)
if (dataDir != null) {
val newDir = copyDataDir(modelDir)
modelDir = newDir + "/" + modelDir
dataDir = newDir + "/" + dataDir
assets = null
}
val config = getOfflineTtsConfig(
modelDir = modelDir!!, modelName = modelName!!, lexicon = lexicon ?: "",
dataDir = dataDir ?: "",
ruleFsts = ruleFsts ?: ""
)!!
tts = OfflineTts(assetManager = assets, config = config)
}
private fun copyDataDir(dataDir: String): String {
println("data dir is $dataDir")
copyAssets(dataDir)
val newDataDir = application.getExternalFilesDir(null)!!.absolutePath
println("newDataDir: $newDataDir")
return newDataDir
}
private fun copyAssets(path: String) {
val assets: Array<String>?
try {
assets = application.assets.list(path)
if (assets!!.isEmpty()) {
copyFile(path)
} else {
val fullPath = "${application.getExternalFilesDir(null)}/$path"
val dir = File(fullPath)
dir.mkdirs()
for (asset in assets.iterator()) {
val p: String = if (path == "") "" else path + "/"
copyAssets(p + asset)
}
}
} catch (ex: IOException) {
Log.e(TAG, "Failed to copy $path. ${ex.toString()}")
}
}
private fun copyFile(filename: String) {
try {
val istream = application.assets.open(filename)
val newFilename = application.getExternalFilesDir(null).toString() + "/" + filename
val ostream = FileOutputStream(newFilename)
// Log.i(TAG, "Copying $filename to $newFilename")
val buffer = ByteArray(1024)
var read = 0
while (read != -1) {
ostream.write(buffer, 0, read)
read = istream.read(buffer)
}
istream.close()
ostream.flush()
ostream.close()
} catch (ex: Exception) {
Log.e(TAG, "Failed to copy $filename, ${ex.toString()}")
}
}
}
... ...
... ... @@ -5,8 +5,9 @@ import android.content.res.AssetManager
data class OfflineTtsVitsModelConfig(
var model: String,
var lexicon: String,
var lexicon: String = "",
var tokens: String,
var dataDir: String = "",
var noiseScale: Float = 0.667f,
var noiseScaleW: Float = 0.8f,
var lengthScale: Float = 1.0f,
... ... @@ -22,6 +23,7 @@ data class OfflineTtsModelConfig(
data class OfflineTtsConfig(
var model: OfflineTtsModelConfig,
var ruleFsts: String = "",
var maxNumSentences: Int = 2,
)
class GeneratedAudio(
... ... @@ -117,18 +119,25 @@ class OfflineTts(
// please refer to
// https://k2-fsa.github.io/sherpa/onnx/tts/pretrained_models/index.html
// to download models
fun getOfflineTtsConfig(modelDir: String, modelName: String, ruleFsts: String): OfflineTtsConfig? {
fun getOfflineTtsConfig(
modelDir: String,
modelName: String,
lexicon: String,
dataDir: String,
ruleFsts: String
): OfflineTtsConfig? {
return OfflineTtsConfig(
model = OfflineTtsModelConfig(
vits = OfflineTtsVitsModelConfig(
model = "$modelDir/$modelName",
lexicon = "$modelDir/lexicon.txt",
tokens = "$modelDir/tokens.txt"
lexicon = "$modelDir/$lexicon",
tokens = "$modelDir/tokens.txt",
dataDir = "$dataDir"
),
numThreads = 2,
debug = true,
provider = "cpu",
),
ruleFsts=ruleFsts,
ruleFsts = ruleFsts,
)
}
... ...
... ... @@ -92,3 +92,4 @@ cmake -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake"
make -j4
make install/strip
cp -fv android-onnxruntime-libs/jni/arm64-v8a/libonnxruntime.so install/lib
rm -rf install/lib/pkgconfig
... ...
... ... @@ -92,3 +92,4 @@ cmake -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake"
make -j4
make install/strip
cp -fv android-onnxruntime-libs/jni/armeabi-v7a/libonnxruntime.so install/lib
rm -rf install/lib/pkgconfig
... ...
... ... @@ -94,3 +94,4 @@ cmake -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake"
make -j4
make install/strip
cp -fv android-onnxruntime-libs/jni/x86_64/libonnxruntime.so install/lib
rm -rf install/lib/pkgconfig
... ...
... ... @@ -94,3 +94,4 @@ cmake -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake"
make -j4
make install/strip
cp -fv android-onnxruntime-libs/jni/x86/libonnxruntime.so install/lib
rm -rf install/lib/pkgconfig
... ...
... ... @@ -140,7 +140,8 @@ echo "Generate xcframework"
mkdir -p "build/simulator/lib"
for f in libkaldi-native-fbank-core.a libsherpa-onnx-c-api.a libsherpa-onnx-core.a \
libsherpa-onnx-fst.a libsherpa-onnx-kaldifst-core.a libkaldi-decoder-core.a; do
libsherpa-onnx-fst.a libsherpa-onnx-kaldifst-core.a libkaldi-decoder-core.a \
libucd.a libpiper_phonemize.a libespeak-ng.a; do
lipo -create build/simulator_arm64/lib/${f} \
build/simulator_x86_64/lib/${f} \
-output build/simulator/lib/${f}
... ... @@ -154,7 +155,10 @@ libtool -static -o build/simulator/sherpa-onnx.a \
build/simulator/lib/libsherpa-onnx-core.a \
build/simulator/lib/libsherpa-onnx-fst.a \
build/simulator/lib/libsherpa-onnx-kaldifst-core.a \
build/simulator/lib/libkaldi-decoder-core.a
build/simulator/lib/libkaldi-decoder-core.a \
build/simulator/lib/libucd.a \
build/simulator/lib/libpiper_phonemize.a \
build/simulator/lib/libespeak-ng.a \
libtool -static -o build/os64/sherpa-onnx.a \
build/os64/lib/libkaldi-native-fbank-core.a \
... ... @@ -162,7 +166,10 @@ libtool -static -o build/os64/sherpa-onnx.a \
build/os64/lib/libsherpa-onnx-core.a \
build/os64/lib/libsherpa-onnx-fst.a \
build/os64/lib/libsherpa-onnx-kaldifst-core.a \
build/os64/lib/libkaldi-decoder-core.a
build/os64/lib/libkaldi-decoder-core.a \
build/os64/lib/libucd.a \
build/os64/lib/libpiper_phonemize.a \
build/os64/lib/libespeak-ng.a \
rm -rf sherpa-onnx.xcframework
... ...
... ... @@ -29,4 +29,7 @@ libtool -static -o ./install/lib/libsherpa-onnx.a \
./install/lib/libkaldi-native-fbank-core.a \
./install/lib/libsherpa-onnx-fst.a \
./install/lib/libsherpa-onnx-kaldifst-core.a \
./install/lib/libkaldi-decoder-core.a
./install/lib/libkaldi-decoder-core.a \
./install/lib/libucd.a \
./install/lib/libpiper_phonemize.a \
./install/lib/libespeak-ng.a
... ...
... ... @@ -65,6 +65,29 @@ static struct cag_option options[] = {
.identifier = 'a',
.description =
"Filename to save the generated audio. Default to ./generated.wav"},
{.access_name = "tts-rule-fsts",
.value_name = "/path/to/rule.fst",
.identifier = 'b',
.description = "It not empty, it contains a list of rule FST filenames."
"Multiple filenames are separated by a comma and they are "
"applied from left to right. An example value: "
"rule1.fst,rule2,fst,rule3.fst"},
{.access_name = "max-num-sentences",
.value_name = "2",
.identifier = 'c',
.description = "Maximum number of sentences that we process at a time. "
"This is to avoid OOM for very long input text. "
"If you set it to -1, then we process all sentences in a "
"single batch."},
{.access_name = "vits-data-dir",
.value_name = "/path/to/espeak-ng-data",
.identifier = 'd',
.description =
"Path to espeak-ng-data. If it is given, --vits-lexicon is ignored"},
};
static void ShowUsage() {
... ... @@ -163,15 +186,38 @@ int32_t main(int32_t argc, char *argv[]) {
free((void *)filename);
filename = strdup(value);
break;
case 'b':
config.rule_fsts = value;
break;
case 'c':
config.max_num_sentences = atoi(value);
break;
case 'd':
config.model.vits.data_dir = value;
break;
case '?':
fprintf(stderr, "Unknown option\n");
// fall through
case 'h':
// fall through
default:
ShowUsage();
}
}
fprintf(stderr, "here\n");
if (!config.model.vits.model) {
fprintf(stderr, "Please provide --vits-model\n");
ShowUsage();
}
if (!config.model.vits.tokens) {
fprintf(stderr, "Please provide --vits-tokens\n");
ShowUsage();
}
if (!config.model.vits.model || !config.model.vits.lexicon ||
!config.model.vits.tokens) {
if (!config.model.vits.data_dir && !config.model.vits.lexicon) {
fprintf(stderr, "Please provide --vits-data-dir or --vits-lexicon\n");
ShowUsage();
}
... ...
... ... @@ -73,6 +73,10 @@ class BuildExtension(build_ext):
extra_cmake_args = f" -DCMAKE_INSTALL_PREFIX={install_dir} "
extra_cmake_args += " -DBUILD_SHARED_LIBS=ON "
extra_cmake_args += " -DBUILD_PIPER_PHONMIZE_EXE=OFF "
extra_cmake_args += " -DBUILD_PIPER_PHONMIZE_TESTS=OFF "
extra_cmake_args += " -DBUILD_ESPEAK_NG_EXE=OFF "
extra_cmake_args += " -DBUILD_ESPEAK_NG_TESTS=OFF "
extra_cmake_args += " -DSHERPA_ONNX_ENABLE_CHECK=OFF "
extra_cmake_args += " -DSHERPA_ONNX_ENABLE_PYTHON=ON "
... ... @@ -146,6 +150,9 @@ class BuildExtension(build_ext):
binaries += ["sherpa-onnx-core.dll"]
binaries += ["sherpa-onnx-portaudio.dll"]
binaries += ["onnxruntime.dll"]
binaries += ["piper_phonemize.dll"]
binaries += ["espeak-ng.dll"]
binaries += ["ucd.dll"]
binaries += ["kaldi-decoder-core.dll"]
binaries += ["sherpa-onnx-fst.lib"]
binaries += ["sherpa-onnx-kaldifst-core.lib"]
... ... @@ -161,5 +168,8 @@ class BuildExtension(build_ext):
shutil.copy(f"{src_file}", f"{out_bin_dir}/")
shutil.rmtree(f"{install_dir}/bin")
shutil.rmtree(f"{install_dir}/share")
shutil.rmtree(f"{install_dir}/lib/pkgconfig")
if is_windows():
shutil.rmtree(f"{install_dir}/lib")
... ...
... ... @@ -86,7 +86,7 @@ function(download_espeak_ng_for_piper)
-Wno-unused-result
-Wno-format-overflow
-Wno-format-truncation
-Wno-maybe-uninitialized
-Wno-uninitialized
-Wno-format
)
... ...
... ... @@ -13,4 +13,4 @@ Cflags: -I"${includedir}"
# Note: -lcargs is required only for the following file
# https://github.com/k2-fsa/sherpa-onnx/blob/master/c-api-examples/decode-file-c-api.c
# We add it here so that users don't need to specify -lcargs when compiling decode-file-c-api.c
Libs: -L"${libdir}" -lsherpa-onnx-c-api -lsherpa-onnx-core -lonnxruntime -lkaldi-decoder-core -lsherpa-onnx-kaldifst-core -lsherpa-onnx-fst -lkaldi-native-fbank-core -lcargs -Wl,-rpath,${libdir} @SHERPA_ONNX_PKG_CONFIG_EXTRA_LIBS@
Libs: -L"${libdir}" -lsherpa-onnx-c-api -lsherpa-onnx-core -lonnxruntime -lkaldi-decoder-core -lsherpa-onnx-kaldifst-core -lsherpa-onnx-fst -lkaldi-native-fbank-core -lpiper_phonemize -lespeak-ng -lucd -lcargs -Wl,-rpath,${libdir} @SHERPA_ONNX_PKG_CONFIG_EXTRA_LIBS@
... ...
... ... @@ -40,7 +40,7 @@
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
C93989AF2A89FE33009AB859 /* onnxruntime.xcframework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcframework; name = onnxruntime.xcframework; path = "../../build-ios/ios-onnxruntime/1.16.0/onnxruntime.xcframework"; sourceTree = "<group>"; };
C93989AF2A89FE33009AB859 /* onnxruntime.xcframework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcframework; name = onnxruntime.xcframework; path = "../../build-ios/ios-onnxruntime/1.16.3/onnxruntime.xcframework"; sourceTree = "<group>"; };
C93989B12A89FF78009AB859 /* decoder.int8.onnx */ = {isa = PBXFileReference; lastKnownFileType = file; name = decoder.int8.onnx; path = "../../../icefall-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en/decoder.int8.onnx"; sourceTree = "<group>"; };
C93989B22A89FF78009AB859 /* encoder.int8.onnx */ = {isa = PBXFileReference; lastKnownFileType = file; name = encoder.int8.onnx; path = "../../../icefall-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en/encoder.int8.onnx"; sourceTree = "<group>"; };
C93989B32A89FF78009AB859 /* tokens.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = tokens.txt; path = "../../../icefall-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en/tokens.txt"; sourceTree = "<group>"; };
... ...
... ... @@ -65,7 +65,7 @@ struct ContentView: View {
self.filename = tempDirectoryURL.appendingPathComponent("test.wav")
}
let ret = audio.save(filename: filename.path)
let _ = audio.save(filename: filename.path)
self.audioPlayer = try! AVAudioPlayer(contentsOf: filename)
self.audioPlayer.play()
... ...
... ... @@ -7,6 +7,12 @@
import Foundation
// used to get the path to espeak-ng-data
func resourceURL(to path: String) -> String {
return URL(string: path, relativeTo: Bundle.main.resourceURL)!.path
}
func getResource(_ forResource: String, _ ofType: String) -> String {
let path = Bundle.main.path(forResource: forResource, ofType: ofType)
precondition(
... ... @@ -59,8 +65,30 @@ func getTtsForAishell3() -> SherpaOnnxOfflineTtsWrapper {
return SherpaOnnxOfflineTtsWrapper(config: &config)
}
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
func getTtsFor_en_US_amy_low() -> SherpaOnnxOfflineTtsWrapper {
// please see https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
// vits-vctk.onnx
let model = getResource("en_US-amy-low", "onnx")
// tokens.txt
let tokens = getResource("tokens", "txt")
// in this case, we don't need lexicon.txt
let dataDir = resourceURL(to: "espeak-ng-data")
let vits = sherpaOnnxOfflineTtsVitsModelConfig(model: model, lexicon: "", tokens: tokens, dataDir: dataDir)
let modelConfig = sherpaOnnxOfflineTtsModelConfig(vits: vits)
var config = sherpaOnnxOfflineTtsConfig(model: modelConfig)
return SherpaOnnxOfflineTtsWrapper(config: &config)
}
func createOfflineTts() -> SherpaOnnxOfflineTtsWrapper {
return getTtsForVCTK()
return getTtsFor_en_US_amy_low()
// return getTtsForVCTK()
// return getTtsForAishell3()
... ...
... ... @@ -8,20 +8,22 @@ fun main() {
}
fun testTts() {
// see https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
// https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
var config = OfflineTtsConfig(
model=OfflineTtsModelConfig(
vits=OfflineTtsVitsModelConfig(
model="./vits-zh-aishell3/vits-aishell3.onnx",
lexicon="./vits-zh-aishell3/lexicon.txt",
tokens="./vits-zh-aishell3/tokens.txt",
model="./vits-piper-en_US-amy-low/en_US-amy-low.onnx",
tokens="./vits-piper-en_US-amy-low/tokens.txt",
dataDir="./vits-piper-en_US-amy-low/espeak-ng-data",
),
numThreads=1,
debug=true,
)
)
val tts = OfflineTts(config=config)
val audio = tts.generate(text="林美丽最美丽!", sid=99, speed=1.2f)
audio.save(filename="99.wav")
val audio = tts.generate(text="“Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar.”")
audio.save(filename="test-en.wav")
}
fun testAsr() {
... ...
... ... @@ -34,9 +34,10 @@ if [ ! -f ./sherpa-onnx-streaming-zipformer-en-2023-02-21/tokens.txt ]; then
git clone https://huggingface.co/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-02-21
fi
if [ ! -f ./vits-zh-aishell3/tokens.txt ]; then
git lfs install
git clone https://huggingface.co/csukuangfj/vits-zh-aishell3
if [ ! -f ./vits-piper-en_US-amy-low/en_US-amy-low.onnx ]; then
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
rm vits-piper-en_US-amy-low.tar.bz2
fi
kotlinc-jvm -include-runtime -d main.jar Main.kt WaveReader.kt SherpaOnnx.kt faked-asset-manager.kt Tts.kt
... ...
node_modules
lib
package-lock.json
... ...
... ... @@ -42,15 +42,14 @@ In the following, we demonstrate how to run text-to-speech.
## ./test-offline-tts-en.js
[./test-offline-tts-en.js](./test-offline-tts-en.js) shows how to use
a VITS pretrained model
[VCTK](https://k2-fsa.github.io/sherpa/onnx/tts/pretrained_models/vits.html#vctk-english-multi-speaker-109-speakers)
[vits-piper-en_US-amy-low.tar.bz2](https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2)
for text-to-speech.
You can use the following command to run it:
```bash
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-vctk.tar.bz2
tar xvf vits-vctk.tar.bz2
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xvf vits-piper-en_US-amy-low.tar.bz2
node ./test-offline-tts-en.js
```
... ...
... ... @@ -4,9 +4,9 @@ const sherpa_onnx = require('sherpa-onnx');
function createOfflineTts() {
const vits = new sherpa_onnx.OfflineTtsVitsModelConfig();
vits.model = './vits-vctk/vits-vctk.onnx';
vits.lexicon = './vits-vctk/lexicon.txt';
vits.tokens = './vits-vctk/tokens.txt';
vits.model = 'vits-piper-en_US-amy-low/en_US-amy-low.onnx'
vits.tokens = './vits-piper-en_US-amy-low/tokens.txt';
vits.dataDir = './vits-piper-en_US-amy-low/espeak-ng-data'
const modelConfig = new sherpa_onnx.OfflineTtsModelConfig();
modelConfig.vits = vits;
... ... @@ -18,10 +18,11 @@ function createOfflineTts() {
}
const tts = createOfflineTts();
const speakerId = 99;
const speakerId = 0;
const speed = 1.0;
const audio =
tts.generate('Good morning. How are you doing?', speakerId, speed);
const audio = tts.generate(
'“Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar.”',
speakerId, speed);
audio.save('./test-en.wav');
console.log('Saved to test-en.wav successfully.');
tts.free();
... ...
... ... @@ -63,16 +63,26 @@ def get_args():
parser.add_argument(
"--vits-lexicon",
type=str,
default="",
help="Path to lexicon.txt",
)
parser.add_argument(
"--vits-tokens",
type=str,
default="",
help="Path to tokens.txt",
)
parser.add_argument(
"--vits-data-dir",
type=str,
default="",
help="""Path to the dict director of espeak-ng. If it is specified,
--vits-lexicon and --vits-tokens are ignored""",
)
parser.add_argument(
"--tts-rule-fsts",
type=str,
default="",
... ... @@ -80,6 +90,17 @@ def get_args():
)
parser.add_argument(
"--max-num-sentences",
type=int,
default=2,
help="""Max number of sentences in a batch to avoid OOM if the input
text is very long. Set it to -1 to process all the sentences in a
single batch. A smaller value does not mean it is slower compared
to a larger one on CPU.
""",
)
parser.add_argument(
"--output-filename",
type=str,
default="./generated.wav",
... ... @@ -142,14 +163,19 @@ def main():
vits=sherpa_onnx.OfflineTtsVitsModelConfig(
model=args.vits_model,
lexicon=args.vits_lexicon,
data_dir=args.vits_data_dir,
tokens=args.vits_tokens,
),
provider=args.provider,
debug=args.debug,
num_threads=args.num_threads,
),
rule_fsts=args.tts_rule_fsts
rule_fsts=args.tts_rule_fsts,
max_num_sentences=args.max_num_sentences,
)
if not tts_config.validate():
raise ValueError("Please check your config")
tts = sherpa_onnx.OfflineTts(tts_config)
start = time.time()
... ...
... ... @@ -37,13 +37,9 @@ model_dir={{ tts_model.model_dir }}
model_name={{ tts_model.model_name }}
lang={{ tts_model.lang }}
mkdir $model_dir
cd $model_dir
wget -qq https://huggingface.co/csukuangfj/$model_dir/resolve/main/$model_name
wget -qq https://huggingface.co/csukuangfj/$model_dir/resolve/main/lexicon.txt
wget -qq https://huggingface.co/csukuangfj/$model_dir/resolve/main/tokens.txt
wget -qq https://huggingface.co/csukuangfj/$model_dir/resolve/main/MODEL_CARD 2>/dev/null || true
wget -qq https://huggingface.co/csukuangfj/$model_dir/resolve/main/rule.fst 2>/dev/null || true
wget -qq https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/$model_dir.tar.bz2
tar xf $model_dir.tar.bz2
rm $model_dir.tar.bz2
popd
# Now we are at the project root directory
... ... @@ -52,11 +48,19 @@ git checkout .
pushd android/SherpaOnnxTts/app/src/main/java/com/k2fsa/sherpa/onnx
sed -i.bak s/"modelDir = null"/"modelDir = \"$model_dir\""/ ./MainActivity.kt
sed -i.bak s/"modelName = null"/"modelName = \"$model_name\""/ ./MainActivity.kt
{% if tts_model.rule_fsts %}
rule_fsts={{ tts_model.rule_fsts }}
sed -i.bak s%"ruleFsts = null"%"ruleFsts = \"$rule_fsts\""% ./MainActivity.kt
{% endif %}
{% if tts_model.data_dir %}
data_dir={{ tts_model.data_dir }}
sed -i.bak s%"dataDir = null"%"dataDir = \"$data_dir\""% ./MainActivity.kt
{% else %}
sed -i.bak s/"lexicon = null"/"lexicon = \"lexicon.txt\""/ ./MainActivity.kt
{% endif %}
git diff
popd
... ...
... ... @@ -27,9 +27,122 @@ def get_args():
@dataclass
class TtsModel:
model_dir: str
model_name: str
lang: str # en, zh, fr, de, etc.
model_name: str = ""
lang: str = "" # en, zh, fr, de, etc.
rule_fsts: Optional[List[str]] = None
data_dir: Optional[str] = None
def get_piper_models() -> List[TtsModel]:
models = [
TtsModel(model_dir="vits-piper-ar_JO-kareem-low"),
TtsModel(model_dir="vits-piper-ar_JO-kareem-medium"),
TtsModel(model_dir="vits-piper-ca_ES-upc_ona-medium"),
TtsModel(model_dir="vits-piper-ca_ES-upc_ona-x_low"),
TtsModel(model_dir="vits-piper-ca_ES-upc_pau-x_low"),
TtsModel(model_dir="vits-piper-ca_ES-upc_pau-x_low"),
TtsModel(model_dir="vits-piper-cs_CZ-jirka-medium"),
TtsModel(model_dir="vits-piper-da_DK-talesyntese-medium"),
TtsModel(model_dir="vits-piper-de_DE-eva_k-x_low"),
TtsModel(model_dir="vits-piper-de_DE-karlsson-low"),
TtsModel(model_dir="vits-piper-de_DE-kerstin-low"),
TtsModel(model_dir="vits-piper-de_DE-pavoque-low"),
TtsModel(model_dir="vits-piper-de_DE-ramona-low"),
TtsModel(model_dir="vits-piper-de_DE-thorsten-high"),
TtsModel(model_dir="vits-piper-de_DE-thorsten-low"),
TtsModel(model_dir="vits-piper-de_DE-thorsten-medium"),
TtsModel(model_dir="vits-piper-de_DE-thorsten_emotional-medium"),
TtsModel(model_dir="vits-piper-el_GR-rapunzelina-low"),
TtsModel(model_dir="vits-piper-en_GB-alan-low"),
TtsModel(model_dir="vits-piper-en_GB-alan-medium"),
TtsModel(model_dir="vits-piper-en_GB-alba-medium"),
TtsModel(model_dir="vits-piper-en_GB-jenny_dioco-medium"),
TtsModel(model_dir="vits-piper-en_GB-northern_english_male-medium"),
TtsModel(model_dir="vits-piper-en_GB-semaine-medium"),
TtsModel(model_dir="vits-piper-en_GB-southern_english_female-low"),
TtsModel(model_dir="vits-piper-en_GB-sweetbbak-amy"),
TtsModel(model_dir="vits-piper-en_GB-vctk-medium"),
TtsModel(model_dir="vits-piper-en_US-amy-low"),
TtsModel(model_dir="vits-piper-en_US-amy-medium"),
TtsModel(model_dir="vits-piper-en_US-arctic-medium"),
TtsModel(model_dir="vits-piper-en_US-danny-low"),
TtsModel(model_dir="vits-piper-en_US-hfc_male-medium"),
TtsModel(model_dir="vits-piper-en_US-joe-medium"),
TtsModel(model_dir="vits-piper-en_US-kathleen-low"),
TtsModel(model_dir="vits-piper-en_US-kusal-medium"),
TtsModel(model_dir="vits-piper-en_US-l2arctic-medium"),
TtsModel(model_dir="vits-piper-en_US-lessac-high"),
TtsModel(model_dir="vits-piper-en_US-lessac-low"),
TtsModel(model_dir="vits-piper-en_US-lessac-medium"),
TtsModel(model_dir="vits-piper-en_US-libritts-high"),
TtsModel(model_dir="vits-piper-en_US-libritts_r-medium"),
TtsModel(model_dir="vits-piper-en_US-ryan-high"),
TtsModel(model_dir="vits-piper-en_US-ryan-low"),
TtsModel(model_dir="vits-piper-en_US-ryan-medium"),
TtsModel(model_dir="vits-piper-es_ES-carlfm-x_low"),
TtsModel(model_dir="vits-piper-es_ES-davefx-medium"),
TtsModel(model_dir="vits-piper-es_ES-mls_10246-low"),
TtsModel(model_dir="vits-piper-es_ES-mls_9972-low"),
TtsModel(model_dir="vits-piper-es_ES-sharvard-medium"),
TtsModel(model_dir="vits-piper-es_MX-ald-medium"),
TtsModel(model_dir="vits-piper-fi_FI-harri-low"),
TtsModel(model_dir="vits-piper-fi_FI-harri-medium"),
TtsModel(model_dir="vits-piper-fr_FR-siwis-low"),
TtsModel(model_dir="vits-piper-fr_FR-siwis-medium"),
TtsModel(model_dir="vits-piper-fr_FR-upmc-medium"),
TtsModel(model_dir="vits-piper-hu_HU-anna-medium"),
TtsModel(model_dir="vits-piper-hu_HU-berta-medium"),
TtsModel(model_dir="vits-piper-hu_HU-imre-medium"),
TtsModel(model_dir="vits-piper-is_IS-bui-medium"),
TtsModel(model_dir="vits-piper-is_IS-salka-medium"),
TtsModel(model_dir="vits-piper-is_IS-steinn-medium"),
TtsModel(model_dir="vits-piper-is_IS-ugla-medium"),
TtsModel(model_dir="vits-piper-it_IT-riccardo-x_low"),
TtsModel(model_dir="vits-piper-ka_GE-natia-medium"),
TtsModel(model_dir="vits-piper-kk_KZ-iseke-x_low"),
TtsModel(model_dir="vits-piper-kk_KZ-issai-high"),
TtsModel(model_dir="vits-piper-kk_KZ-raya-x_low"),
TtsModel(model_dir="vits-piper-lb_LU-marylux-medium"),
TtsModel(model_dir="vits-piper-ne_NP-google-medium"),
TtsModel(model_dir="vits-piper-ne_NP-google-x_low"),
TtsModel(model_dir="vits-piper-nl_BE-nathalie-medium"),
TtsModel(model_dir="vits-piper-nl_BE-nathalie-x_low"),
TtsModel(model_dir="vits-piper-nl_BE-rdh-medium"),
TtsModel(model_dir="vits-piper-nl_BE-rdh-x_low"),
TtsModel(model_dir="vits-piper-nl_NL-mls_5809-low"),
TtsModel(model_dir="vits-piper-nl_NL-mls_7432-low"),
TtsModel(model_dir="vits-piper-no_NO-talesyntese-medium"),
TtsModel(model_dir="vits-piper-pl_PL-darkman-medium"),
TtsModel(model_dir="vits-piper-pl_PL-gosia-medium"),
TtsModel(model_dir="vits-piper-pl_PL-mc_speech-medium"),
TtsModel(model_dir="vits-piper-pl_PL-mls_6892-low"),
TtsModel(model_dir="vits-piper-pt_BR-edresson-low"),
TtsModel(model_dir="vits-piper-pt_BR-faber-medium"),
TtsModel(model_dir="vits-piper-pt_PT-tugao-medium"),
TtsModel(model_dir="vits-piper-ro_RO-mihai-medium"),
TtsModel(model_dir="vits-piper-ru_RU-denis-medium"),
TtsModel(model_dir="vits-piper-ru_RU-dmitri-medium"),
TtsModel(model_dir="vits-piper-ru_RU-irina-medium"),
TtsModel(model_dir="vits-piper-ru_RU-ruslan-medium"),
TtsModel(model_dir="vits-piper-sk_SK-lili-medium"),
TtsModel(model_dir="vits-piper-sr_RS-serbski_institut-medium"),
TtsModel(model_dir="vits-piper-sv_SE-nst-medium"),
TtsModel(model_dir="vits-piper-sw_CD-lanfrica-medium"),
TtsModel(model_dir="vits-piper-tr_TR-dfki-medium"),
TtsModel(model_dir="vits-piper-tr_TR-fahrettin-medium"),
TtsModel(model_dir="vits-piper-uk_UA-lada-x_low"),
TtsModel(model_dir="vits-piper-uk_UA-ukrainian_tts-medium"),
TtsModel(model_dir="vits-piper-vi_VN-25hours_single-low"),
TtsModel(model_dir="vits-piper-vi_VN-vais1000-medium"),
TtsModel(model_dir="vits-piper-vi_VN-vivos-x_low"),
TtsModel(model_dir="vits-piper-zh_CN-huayan-medium"),
]
for m in models:
m.data_dir = m.model_dir + "/" + "espeak-ng-data"
m.model_name = m.model_dir[len("vits-piper-") :] + ".onnx"
m.lang = "en"
return models
def get_all_models() -> List[TtsModel]:
... ... @@ -98,56 +211,6 @@ def get_all_models() -> List[TtsModel]:
# English (US)
TtsModel(model_dir="vits-vctk", model_name="vits-vctk.onnx", lang="en"),
TtsModel(model_dir="vits-ljs", model_name="vits-ljs.onnx", lang="en"),
TtsModel(model_dir="vits-piper-en_US-amy-low", model_name="en_US-amy-low.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-amy-medium", model_name="en_US-amy-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-arctic-medium", model_name="en_US-arctic-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-danny-low", model_name="en_US-danny-low.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-hfc_male-medium", model_name="en_US-hfc_male-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-joe-medium", model_name="en_US-joe-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-kathleen-low", model_name="en_US-kathleen-low.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-kusal-medium", model_name="en_US-kusal-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-l2arctic-medium", model_name="en_US-l2arctic-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-lessac-low", model_name="en_US-lessac-low.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-lessac-medium", model_name="en_US-lessac-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-lessac-high", model_name="en_US-lessac-high.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-libritts-high", model_name="en_US-libritts-high.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-libritts_r-medium", model_name="en_US-libritts_r-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-ryan-low", model_name="en_US-ryan-low.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-ryan-medium", model_name="en_US-ryan-medium.onnx", lang="en",),
TtsModel(model_dir="vits-piper-en_US-ryan-high", model_name="en_US-ryan-high.onnx", lang="en",),
# English (GB)
TtsModel(model_dir="vits-piper-en_GB-alan-low", model_name="en_GB-alan-low.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-alan-medium", model_name="en_GB-alan-medium.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-alba-medium", model_name="en_GB-alba-medium.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-jenny_dioco-medium", model_name="en_GB-jenny_dioco-medium.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-northern_english_male-medium", model_name="en_GB-northern_english_male-medium.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-semaine-medium", model_name="en_GB-semaine-medium.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-southern_english_female-low", model_name="en_GB-southern_english_female-low.onnx",lang="en",),
TtsModel(model_dir="vits-piper-en_GB-vctk-medium", model_name="en_GB-vctk-medium.onnx",lang="en",),
# German (DE)
TtsModel(model_dir="vits-piper-de_DE-eva_k-x_low", model_name="de_DE-eva_k-x_low.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-karlsson-low", model_name="de_DE-karlsson-low.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-kerstin-low", model_name="de_DE-kerstin-low.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-pavoque-low", model_name="de_DE-pavoque-low.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-ramona-low", model_name="de_DE-ramona-low.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-thorsten-low", model_name="de_DE-thorsten-low.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-thorsten-medium", model_name="de_DE-thorsten-medium.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-thorsten-high", model_name="de_DE-thorsten-high.onnx",lang="de",),
TtsModel(model_dir="vits-piper-de_DE-thorsten_emotional-medium", model_name="de_DE-thorsten_emotional-medium.onnx",lang="de",),
# French (FR)
TtsModel(model_dir="vits-piper-fr_FR-upmc-medium", model_name="fr_FR-upmc-medium.onnx",lang="fr",),
TtsModel(model_dir="vits-piper-fr_FR-siwis-low", model_name="fr_FR-siwis-low.onnx",lang="fr",),
TtsModel(model_dir="vits-piper-fr_FR-siwis-medium", model_name="fr_FR-siwis-medium.onnx",lang="fr",),
# Spanish (ES)
TtsModel(model_dir="vits-piper-es_ES-carlfm-x_low", model_name="es_ES-carlfm-x_low.onnx",lang="es",),
TtsModel(model_dir="vits-piper-es_ES-davefx-medium", model_name="es_ES-davefx-medium.onnx",lang="es",),
TtsModel(model_dir="vits-piper-es_ES-mls_10246-low", model_name="es_ES-mls_10246-low.onnx",lang="es",),
TtsModel(model_dir="vits-piper-es_ES-mls_9972-low", model_name="es_ES-mls_9972-low.onnx",lang="es",),
TtsModel(model_dir="vits-piper-es_ES-sharvard-medium", model_name="es_ES-sharvard-medium.onnx",lang="es",),
# Spanish (MX)
TtsModel(model_dir="vits-piper-es_MX-ald-medium", model_name="es_MX-ald-medium.onnx",lang="es",),
# fmt: on
]
... ... @@ -162,7 +225,8 @@ def main():
s = f.read()
template = environment.from_string(s)
d = dict()
all_model_list = get_all_models()
# all_model_list = get_all_models()
all_model_list = get_piper_models()
num_models = len(all_model_list)
num_per_runner = num_models // total
... ...
... ... @@ -186,6 +186,7 @@ const SherpaOnnxOfflineTtsVitsModelConfig = StructType({
"model" : cstring,
"lexicon" : cstring,
"tokens" : cstring,
"dataDir" : cstring,
"noiseScale" : float,
"noiseScaleW" : float,
"lengthScale" : float,
... ... @@ -201,6 +202,7 @@ const SherpaOnnxOfflineTtsModelConfig = StructType({
const SherpaOnnxOfflineTtsConfig = StructType({
"model" : SherpaOnnxOfflineTtsModelConfig,
"ruleFsts" : cstring,
"maxNumSentences" : int32_t,
});
const SherpaOnnxGeneratedAudio = StructType({
... ...
... ... @@ -65,6 +65,9 @@ def get_binaries_to_install():
binaries += ["sherpa-onnx-core.dll"]
binaries += ["sherpa-onnx-portaudio.dll"]
binaries += ["onnxruntime.dll"]
binaries += ["piper_phonemize.dll"]
binaries += ["espeak-ng.dll"]
binaries += ["ucd.dll"]
binaries += ["kaldi-decoder-core.dll"]
binaries += ["sherpa-onnx-fst.lib"]
binaries += ["sherpa-onnx-kaldifst-core.lib"]
... ...
... ... @@ -547,6 +547,8 @@ SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts(
tts_config.model.vits.lexicon =
SHERPA_ONNX_OR(config->model.vits.lexicon, "");
tts_config.model.vits.tokens = SHERPA_ONNX_OR(config->model.vits.tokens, "");
tts_config.model.vits.data_dir =
SHERPA_ONNX_OR(config->model.vits.data_dir, "");
tts_config.model.vits.noise_scale =
SHERPA_ONNX_OR(config->model.vits.noise_scale, 0.667);
tts_config.model.vits.noise_scale_w =
... ... @@ -558,6 +560,7 @@ SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts(
tts_config.model.debug = config->model.debug;
tts_config.model.provider = SHERPA_ONNX_OR(config->model.provider, "cpu");
tts_config.rule_fsts = SHERPA_ONNX_OR(config->rule_fsts, "");
tts_config.max_num_sentences = SHERPA_ONNX_OR(config->max_num_sentences, 2);
if (tts_config.model.debug) {
fprintf(stderr, "%s\n", tts_config.ToString().c_str());
... ...
... ... @@ -607,6 +607,7 @@ SHERPA_ONNX_API typedef struct SherpaOnnxOfflineTtsVitsModelConfig {
const char *model;
const char *lexicon;
const char *tokens;
const char *data_dir;
float noise_scale;
float noise_scale_w;
... ... @@ -623,6 +624,7 @@ SHERPA_ONNX_API typedef struct SherpaOnnxOfflineTtsModelConfig {
SHERPA_ONNX_API typedef struct SherpaOnnxOfflineTtsConfig {
SherpaOnnxOfflineTtsModelConfig model;
const char *rule_fsts;
int32_t max_num_sentences;
} SherpaOnnxOfflineTtsConfig;
SHERPA_ONNX_API typedef struct SherpaOnnxGeneratedAudio {
... ...
... ... @@ -74,6 +74,7 @@ set(sources
packed-sequence.cc
pad-sequence.cc
parse-options.cc
piper-phonemize-lexicon.cc
provider.cc
resample.cc
session.cc
... ...
... ... @@ -129,8 +129,8 @@ Lexicon::Lexicon(AAssetManager *mgr, const std::string &lexicon,
}
#endif
std::vector<int64_t> Lexicon::ConvertTextToTokenIds(
const std::string &text) const {
std::vector<std::vector<int64_t>> Lexicon::ConvertTextToTokenIds(
const std::string &text, const std::string & /*voice*/ /*= ""*/) const {
switch (language_) {
case Language::kEnglish:
return ConvertTextToTokenIdsEnglish(text);
... ... @@ -150,7 +150,7 @@ std::vector<int64_t> Lexicon::ConvertTextToTokenIds(
return {};
}
std::vector<int64_t> Lexicon::ConvertTextToTokenIdsChinese(
std::vector<std::vector<int64_t>> Lexicon::ConvertTextToTokenIdsChinese(
const std::string &text) const {
std::vector<std::string> words;
if (pattern_) {
... ... @@ -245,10 +245,10 @@ std::vector<int64_t> Lexicon::ConvertTextToTokenIdsChinese(
ans.push_back(eos);
}
return ans;
return {ans};
}
std::vector<int64_t> Lexicon::ConvertTextToTokenIdsEnglish(
std::vector<std::vector<int64_t>> Lexicon::ConvertTextToTokenIdsEnglish(
const std::string &_text) const {
std::string text(_text);
ToLowerCase(&text);
... ... @@ -301,7 +301,7 @@ std::vector<int64_t> Lexicon::ConvertTextToTokenIdsEnglish(
ans.push_back(token2id_.at("$")); // eos
}
return ans;
return {ans};
}
void Lexicon::InitTokens(std::istream &is) { token2id_ = ReadTokens(is); }
... ...
... ... @@ -18,11 +18,15 @@
#include "android/asset_manager_jni.h"
#endif
#include "sherpa-onnx/csrc/offline-tts-frontend.h"
namespace sherpa_onnx {
// TODO(fangjun): Refactor it to an abstract class
class Lexicon {
class Lexicon : public OfflineTtsFrontend {
public:
Lexicon() = default; // for subclasses
//
// Note: for models from piper, we won't use this class.
Lexicon(const std::string &lexicon, const std::string &tokens,
const std::string &punctuations, const std::string &language,
bool debug = false, bool is_piper = false);
... ... @@ -34,28 +38,29 @@ class Lexicon {
bool is_piper = false);
#endif
std::vector<int64_t> ConvertTextToTokenIds(const std::string &text) const;
std::vector<std::vector<int64_t>> ConvertTextToTokenIds(
const std::string &text, const std::string &voice = "") const override;
private:
std::vector<int64_t> ConvertTextToTokenIdsGerman(
std::vector<std::vector<int64_t>> ConvertTextToTokenIdsGerman(
const std::string &text) const {
return ConvertTextToTokenIdsEnglish(text);
}
std::vector<int64_t> ConvertTextToTokenIdsSpanish(
std::vector<std::vector<int64_t>> ConvertTextToTokenIdsSpanish(
const std::string &text) const {
return ConvertTextToTokenIdsEnglish(text);
}
std::vector<int64_t> ConvertTextToTokenIdsFrench(
std::vector<std::vector<int64_t>> ConvertTextToTokenIdsFrench(
const std::string &text) const {
return ConvertTextToTokenIdsEnglish(text);
}
std::vector<int64_t> ConvertTextToTokenIdsEnglish(
std::vector<std::vector<int64_t>> ConvertTextToTokenIdsEnglish(
const std::string &text) const;
std::vector<int64_t> ConvertTextToTokenIdsChinese(
std::vector<std::vector<int64_t>> ConvertTextToTokenIdsChinese(
const std::string &text) const;
void InitLanguage(const std::string &lang);
... ...
... ... @@ -43,6 +43,21 @@
} \
} while (0)
#define SHERPA_ONNX_READ_META_DATA_WITH_DEFAULT(dst, src_key, default_value) \
do { \
auto value = \
meta_data.LookupCustomMetadataMapAllocated(src_key, allocator); \
if (!value) { \
dst = default_value; \
} else { \
dst = atoi(value.get()); \
if (dst < 0) { \
SHERPA_ONNX_LOGE("Invalid value %d for %s", dst, src_key); \
exit(-1); \
} \
} \
} while (0)
// read a vector of integers
#define SHERPA_ONNX_READ_META_DATA_VEC(dst, src_key) \
do { \
... ... @@ -112,4 +127,20 @@
} \
} while (0)
#define SHERPA_ONNX_READ_META_DATA_STR_WITH_DEFAULT(dst, src_key, \
default_value) \
do { \
auto value = \
meta_data.LookupCustomMetadataMapAllocated(src_key, allocator); \
if (!value) { \
dst = default_value; \
} else { \
dst = value.get(); \
if (dst.empty()) { \
SHERPA_ONNX_LOGE("Invalid value for %s\n", src_key); \
exit(-1); \
} \
} \
} while (0)
#endif // SHERPA_ONNX_CSRC_MACROS_H_
... ...
// sherpa-onnx/csrc/offline-tts-frontend.h
//
// Copyright (c) 2023 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_OFFLINE_TTS_FRONTEND_H_
#define SHERPA_ONNX_CSRC_OFFLINE_TTS_FRONTEND_H_
#include <cstdint>
#include <string>
#include <vector>
namespace sherpa_onnx {
class OfflineTtsFrontend {
public:
virtual ~OfflineTtsFrontend() = default;
/** Convert a string to token IDs.
*
* @param text The input text.
* Example 1: "This is the first sample sentence; this is the
* second one." Example 2: "这是第一句。这是第二句。"
* @param voice Optional. It is for espeak-ng.
*
* @return Return a vector-of-vector of token IDs. Each subvector contains
* a sentence that can be processed independently.
* If a frontend does not support splitting the text into sentences,
* the resulting vector contains only one subvector.
*/
virtual std::vector<std::vector<int64_t>> ConvertTextToTokenIds(
const std::string &text, const std::string &voice = "") const = 0;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_OFFLINE_TTS_FRONTEND_H_
... ...
... ... @@ -18,9 +18,11 @@
#include "kaldifst/csrc/text-normalizer.h"
#include "sherpa-onnx/csrc/lexicon.h"
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/offline-tts-frontend.h"
#include "sherpa-onnx/csrc/offline-tts-impl.h"
#include "sherpa-onnx/csrc/offline-tts-vits-model.h"
#include "sherpa-onnx/csrc/onnx-utils.h"
#include "sherpa-onnx/csrc/piper-phonemize-lexicon.h"
#include "sherpa-onnx/csrc/text-utils.h"
namespace sherpa_onnx {
... ... @@ -29,10 +31,9 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
public:
explicit OfflineTtsVitsImpl(const OfflineTtsConfig &config)
: config_(config),
model_(std::make_unique<OfflineTtsVitsModel>(config.model)),
lexicon_(config.model.vits.lexicon, config.model.vits.tokens,
model_->Punctuations(), model_->Language(), config.model.debug,
model_->IsPiper()) {
model_(std::make_unique<OfflineTtsVitsModel>(config.model)) {
InitFrontend();
if (!config.rule_fsts.empty()) {
std::vector<std::string> files;
SplitStringToVector(config.rule_fsts, ",", false, &files);
... ... @@ -49,10 +50,9 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
#if __ANDROID_API__ >= 9
OfflineTtsVitsImpl(AAssetManager *mgr, const OfflineTtsConfig &config)
: config_(config),
model_(std::make_unique<OfflineTtsVitsModel>(mgr, config.model)),
lexicon_(mgr, config.model.vits.lexicon, config.model.vits.tokens,
model_->Punctuations(), model_->Language(), config.model.debug,
model_->IsPiper()) {
model_(std::make_unique<OfflineTtsVitsModel>(mgr, config.model)) {
InitFrontend(mgr);
if (!config.rule_fsts.empty()) {
std::vector<std::string> files;
SplitStringToVector(config.rule_fsts, ",", false, &files);
... ... @@ -101,20 +101,119 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
}
}
std::vector<int64_t> x = lexicon_.ConvertTextToTokenIds(text);
if (x.empty()) {
std::vector<std::vector<int64_t>> x =
frontend_->ConvertTextToTokenIds(text, model_->Voice());
if (x.empty() || (x.size() == 1 && x[0].empty())) {
SHERPA_ONNX_LOGE("Failed to convert %s to token IDs", text.c_str());
return {};
}
if (model_->AddBlank()) {
if (model_->AddBlank() && config_.model.vits.data_dir.empty()) {
for (auto &k : x) {
k = AddBlank(k);
}
}
int32_t x_size = static_cast<int32_t>(x.size());
if (config_.max_num_sentences <= 0 || x_size <= config_.max_num_sentences) {
return Process(x, sid, speed);
}
// the input text is too long, we process sentences within it in batches
// to avoid OOM. Batch size is config_.max_num_sentences
std::vector<std::vector<int64_t>> batch;
int32_t batch_size = config_.max_num_sentences;
batch.reserve(config_.max_num_sentences);
int32_t num_batches = x_size / batch_size;
if (config_.model.debug) {
SHERPA_ONNX_LOGE(
"Text is too long. Split it into %d batches. batch size: %d. Number "
"of sentences: %d",
num_batches, batch_size, x_size);
}
GeneratedAudio ans;
int32_t k = 0;
for (int32_t b = 0; b != num_batches; ++b) {
batch.clear();
for (int32_t i = 0; i != batch_size; ++i, ++k) {
batch.push_back(std::move(x[k]));
}
auto audio = Process(batch, sid, speed);
ans.sample_rate = audio.sample_rate;
ans.samples.insert(ans.samples.end(), audio.samples.begin(),
audio.samples.end());
}
batch.clear();
while (k < x.size()) {
batch.push_back(std::move(x[k]));
++k;
}
if (!batch.empty()) {
auto audio = Process(batch, sid, speed);
ans.sample_rate = audio.sample_rate;
ans.samples.insert(ans.samples.end(), audio.samples.begin(),
audio.samples.end());
}
return ans;
}
private:
void InitFrontend(AAssetManager *mgr) {
if (model_->IsPiper() && !config_.model.vits.data_dir.empty()) {
frontend_ = std::make_unique<PiperPhonemizeLexicon>(
mgr, config_.model.vits.tokens, config_.model.vits.data_dir);
} else {
frontend_ = std::make_unique<Lexicon>(
mgr, config_.model.vits.lexicon, config_.model.vits.tokens,
model_->Punctuations(), model_->Language(), config_.model.debug,
model_->IsPiper());
}
}
void InitFrontend() {
if (model_->IsPiper() && !config_.model.vits.data_dir.empty()) {
frontend_ = std::make_unique<PiperPhonemizeLexicon>(
config_.model.vits.tokens, config_.model.vits.data_dir);
} else {
frontend_ = std::make_unique<Lexicon>(
config_.model.vits.lexicon, config_.model.vits.tokens,
model_->Punctuations(), model_->Language(), config_.model.debug,
model_->IsPiper());
}
}
std::vector<int64_t> AddBlank(const std::vector<int64_t> &x) const {
// we assume the blank ID is 0
std::vector<int64_t> buffer(x.size() * 2 + 1);
int32_t i = 1;
for (auto k : x) {
buffer[i] = k;
i += 2;
}
x = std::move(buffer);
return buffer;
}
GeneratedAudio Process(const std::vector<std::vector<int64_t>> &tokens,
int32_t sid, float speed) const {
int32_t num_tokens = 0;
for (const auto &k : tokens) {
num_tokens += k.size();
}
std::vector<int64_t> x;
x.reserve(num_tokens);
for (const auto &k : tokens) {
x.insert(x.end(), k.begin(), k.end());
}
auto memory_info =
... ... @@ -147,7 +246,7 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
OfflineTtsConfig config_;
std::unique_ptr<OfflineTtsVitsModel> model_;
std::vector<std::unique_ptr<kaldifst::TextNormalizer>> tn_list_;
Lexicon lexicon_;
std::unique_ptr<OfflineTtsFrontend> frontend_;
};
} // namespace sherpa_onnx
... ...
... ... @@ -13,6 +13,9 @@ void OfflineTtsVitsModelConfig::Register(ParseOptions *po) {
po->Register("vits-model", &model, "Path to VITS model");
po->Register("vits-lexicon", &lexicon, "Path to lexicon.txt for VITS models");
po->Register("vits-tokens", &tokens, "Path to tokens.txt for VITS models");
po->Register("vits-data-dir", &data_dir,
"Path to the directory containing dict for espeak-ng. If it is "
"given, --vits-lexicon is ignored.");
po->Register("vits-noise-scale", &noise_scale, "noise_scale for VITS models");
po->Register("vits-noise-scale-w", &noise_scale_w,
"noise_scale_w for VITS models");
... ... @@ -31,6 +34,17 @@ bool OfflineTtsVitsModelConfig::Validate() const {
return false;
}
if (tokens.empty()) {
SHERPA_ONNX_LOGE("Please provide --vits-tokens");
return false;
}
if (!FileExists(tokens)) {
SHERPA_ONNX_LOGE("--vits-tokens: %s does not exist", tokens.c_str());
return false;
}
if (data_dir.empty()) {
if (lexicon.empty()) {
SHERPA_ONNX_LOGE("Please provide --vits-lexicon");
return false;
... ... @@ -41,16 +55,32 @@ bool OfflineTtsVitsModelConfig::Validate() const {
return false;
}
if (tokens.empty()) {
SHERPA_ONNX_LOGE("Please provide --vits-tokens");
} else {
if (!FileExists(data_dir + "/phontab")) {
SHERPA_ONNX_LOGE("%s/phontab does not exist. Skipping test",
data_dir.c_str());
return false;
}
if (!FileExists(tokens)) {
SHERPA_ONNX_LOGE("--vits-tokens: %s does not exist", tokens.c_str());
if (!FileExists(data_dir + "/phonindex")) {
SHERPA_ONNX_LOGE("%s/phonindex does not exist. Skipping test",
data_dir.c_str());
return false;
}
if (!FileExists(data_dir + "/phondata")) {
SHERPA_ONNX_LOGE("%s/phondata does not exist. Skipping test",
data_dir.c_str());
return false;
}
if (!FileExists(data_dir + "/intonations")) {
SHERPA_ONNX_LOGE("%s/intonations does not exist. Skipping test",
data_dir.c_str());
return false;
}
}
return true;
}
... ... @@ -61,6 +91,7 @@ std::string OfflineTtsVitsModelConfig::ToString() const {
os << "model=\"" << model << "\", ";
os << "lexicon=\"" << lexicon << "\", ";
os << "tokens=\"" << tokens << "\", ";
os << "data_dir=\"" << data_dir << "\", ";
os << "noise_scale=" << noise_scale << ", ";
os << "noise_scale_w=" << noise_scale_w << ", ";
os << "length_scale=" << length_scale << ")";
... ...
... ... @@ -16,6 +16,10 @@ struct OfflineTtsVitsModelConfig {
std::string lexicon;
std::string tokens;
// If data_dir is given, lexicon is ignored
// data_dir is for piper-phonemize, which uses espeak-ng
std::string data_dir;
float noise_scale = 0.667;
float noise_scale_w = 0.8;
float length_scale = 1;
... ... @@ -28,11 +32,13 @@ struct OfflineTtsVitsModelConfig {
OfflineTtsVitsModelConfig(const std::string &model,
const std::string &lexicon,
const std::string &tokens,
const std::string &data_dir,
float noise_scale = 0.667,
float noise_scale_w = 0.8, float length_scale = 1)
: model(model),
lexicon(lexicon),
tokens(tokens),
data_dir(data_dir),
noise_scale(noise_scale),
noise_scale_w(noise_scale_w),
length_scale(length_scale) {}
... ...
... ... @@ -51,6 +51,7 @@ class OfflineTtsVitsModel::Impl {
std::string Punctuations() const { return punctuations_; }
std::string Language() const { return language_; }
std::string Voice() const { return voice_; }
bool IsPiper() const { return is_piper_; }
int32_t NumSpeakers() const { return num_speakers_; }
... ... @@ -74,10 +75,12 @@ class OfflineTtsVitsModel::Impl {
Ort::AllocatorWithDefaultOptions allocator; // used in the macro below
SHERPA_ONNX_READ_META_DATA(sample_rate_, "sample_rate");
SHERPA_ONNX_READ_META_DATA(add_blank_, "add_blank");
SHERPA_ONNX_READ_META_DATA_WITH_DEFAULT(add_blank_, "add_blank", 0);
SHERPA_ONNX_READ_META_DATA(num_speakers_, "n_speakers");
SHERPA_ONNX_READ_META_DATA_STR(punctuations_, "punctuation");
SHERPA_ONNX_READ_META_DATA_STR_WITH_DEFAULT(punctuations_, "punctuation",
"");
SHERPA_ONNX_READ_META_DATA_STR(language_, "language");
SHERPA_ONNX_READ_META_DATA_STR_WITH_DEFAULT(voice_, "voice", "");
std::string comment;
SHERPA_ONNX_READ_META_DATA_STR(comment, "comment");
... ... @@ -215,6 +218,7 @@ class OfflineTtsVitsModel::Impl {
int32_t num_speakers_;
std::string punctuations_;
std::string language_;
std::string voice_;
bool is_piper_ = false;
};
... ... @@ -244,6 +248,7 @@ std::string OfflineTtsVitsModel::Punctuations() const {
}
std::string OfflineTtsVitsModel::Language() const { return impl_->Language(); }
std::string OfflineTtsVitsModel::Voice() const { return impl_->Voice(); }
bool OfflineTtsVitsModel::IsPiper() const { return impl_->IsPiper(); }
... ...
... ... @@ -46,7 +46,8 @@ class OfflineTtsVitsModel {
bool AddBlank() const;
std::string Punctuations() const;
std::string Language() const;
std::string Language() const; // e.g., Chinese, English, German, etc.
std::string Voice() const; // e.g., en-us, for espeak-ng
bool IsPiper() const;
int32_t NumSpeakers() const;
... ...
... ... @@ -21,6 +21,12 @@ void OfflineTtsConfig::Register(ParseOptions *po) {
"Multiple filenames are separated by a comma and they are "
"applied from left to right. An example value: "
"rule1.fst,rule2,fst,rule3.fst");
po->Register(
"tts-max-num-sentences", &max_num_sentences,
"Maximum number of sentences that we process at a time. "
"This is to avoid OOM for very long input text. "
"If you set it to -1, then we process all sentences in a single batch.");
}
bool OfflineTtsConfig::Validate() const {
... ... @@ -43,7 +49,8 @@ std::string OfflineTtsConfig::ToString() const {
os << "OfflineTtsConfig(";
os << "model=" << model.ToString() << ", ";
os << "rule_fsts=\"" << rule_fsts << "\")";
os << "rule_fsts=\"" << rule_fsts << "\", ";
os << "max_num_sentences=" << max_num_sentences << ")";
return os.str();
}
... ...
... ... @@ -28,10 +28,17 @@ struct OfflineTtsConfig {
// If there are multiple rules, they are applied from left to right.
std::string rule_fsts;
// Maximum number of sentences that we process at a time.
// This is to avoid OOM for very long input text.
// If you set it to -1, then we process all sentences in a single batch.
int32_t max_num_sentences = 2;
OfflineTtsConfig() = default;
OfflineTtsConfig(const OfflineTtsModelConfig &model,
const std::string &rule_fsts)
: model(model), rule_fsts(rule_fsts) {}
const std::string &rule_fsts, int32_t max_num_sentences)
: model(model),
rule_fsts(rule_fsts),
max_num_sentences(max_num_sentences) {}
void Register(ParseOptions *po);
bool Validate() const;
... ...
// sherpa-onnx/csrc/piper-phonemize-lexicon.cc
//
// Copyright (c) 2022-2023 Xiaomi Corporation
#include "sherpa-onnx/csrc/piper-phonemize-lexicon.h"
#include <codecvt>
#include <fstream>
#include <locale>
#include <map>
#include <mutex> // NOLINT
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#if __ANDROID_API__ >= 9
#include <strstream>
#include "android/asset_manager.h"
#include "android/asset_manager_jni.h"
#endif
#include "espeak-ng/speak_lib.h"
#include "phoneme_ids.hpp"
#include "phonemize.hpp"
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/onnx-utils.h"
namespace sherpa_onnx {
static std::unordered_map<char32_t, int32_t> ReadTokens(std::istream &is) {
std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> conv;
std::unordered_map<char32_t, int32_t> token2id;
std::string line;
std::string sym;
std::u32string s;
int32_t id;
while (std::getline(is, line)) {
std::istringstream iss(line);
iss >> sym;
if (iss.eof()) {
id = atoi(sym.c_str());
sym = " ";
} else {
iss >> id;
}
// eat the trailing \r\n on windows
iss >> std::ws;
if (!iss.eof()) {
SHERPA_ONNX_LOGE("Error when reading tokens: %s", line.c_str());
exit(-1);
}
s = conv.from_bytes(sym);
if (s.size() != 1) {
SHERPA_ONNX_LOGE("Error when reading tokens at Line %s. size: %d",
line.c_str(), static_cast<int32_t>(s.size()));
exit(-1);
}
char32_t c = s[0];
if (token2id.count(c)) {
SHERPA_ONNX_LOGE("Duplicated token %s. Line %s. Existing ID: %d",
sym.c_str(), line.c_str(), token2id.at(c));
exit(-1);
}
token2id.insert({c, id});
}
return token2id;
}
// see the function "phonemes_to_ids" from
// https://github.com/rhasspy/piper/blob/master/notebooks/piper_inference_(ONNX).ipynb
static std::vector<int64_t> PhonemesToIds(
const std::unordered_map<char32_t, int32_t> &token2id,
const std::vector<piper::Phoneme> &phonemes) {
// see
// https://github.com/rhasspy/piper-phonemize/blob/master/src/phoneme_ids.hpp#L17
int32_t pad = token2id.at(U'_');
int32_t bos = token2id.at(U'^');
int32_t eos = token2id.at(U'$');
std::vector<int64_t> ans;
ans.reserve(phonemes.size());
ans.push_back(bos);
for (auto p : phonemes) {
if (token2id.count(p)) {
ans.push_back(token2id.at(p));
ans.push_back(pad);
} else {
SHERPA_ONNX_LOGE("Skip unkown phonemes. Unicode codepoint: \\U+%04x.", p);
}
}
ans.push_back(eos);
return ans;
}
void InitEspeak(const std::string &data_dir) {
static std::once_flag init_flag;
std::call_once(init_flag, [data_dir]() {
int32_t result =
espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS, 0, data_dir.c_str(), 0);
if (result != 22050) {
SHERPA_ONNX_LOGE(
"Failed to initialize espeak-ng with data dir: %s. Return code is: "
"%d",
data_dir.c_str(), result);
exit(-1);
}
});
}
PiperPhonemizeLexicon::PiperPhonemizeLexicon(const std::string &tokens,
const std::string &data_dir)
: data_dir_(data_dir) {
{
std::ifstream is(tokens);
token2id_ = ReadTokens(is);
}
InitEspeak(data_dir_);
}
#if __ANDROID_API__ >= 9
PiperPhonemizeLexicon::PiperPhonemizeLexicon(AAssetManager *mgr,
const std::string &tokens,
const std::string &data_dir) {
{
auto buf = ReadFile(mgr, tokens);
std::istrstream is(buf.data(), buf.size());
token2id_ = ReadTokens(is);
}
// We should copy the directory of espeak-ng-data from the asset to
// some internal or external storage and then pass the directory to data_dir.
InitEspeak(data_dir_);
}
#endif
std::vector<std::vector<int64_t>> PiperPhonemizeLexicon::ConvertTextToTokenIds(
const std::string &text, const std::string &voice /*= ""*/) const {
piper::eSpeakPhonemeConfig config;
// ./bin/espeak-ng-bin --path ./install/share/espeak-ng-data/ --voices
// to list available voices
config.voice = voice; // e.g., voice is en-us
std::vector<std::vector<piper::Phoneme>> phonemes;
piper::phonemize_eSpeak(text, config, phonemes);
std::vector<std::vector<int64_t>> ans;
std::vector<int64_t> phoneme_ids;
for (const auto &p : phonemes) {
phoneme_ids = PhonemesToIds(token2id_, p);
ans.push_back(std::move(phoneme_ids));
}
return ans;
}
} // namespace sherpa_onnx
... ...
// sherpa-onnx/csrc/piper-phonemize-lexicon.h
//
// Copyright (c) 2022-2023 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_PIPER_PHONEMIZE_LEXICON_H_
#define SHERPA_ONNX_CSRC_PIPER_PHONEMIZE_LEXICON_H_
#include <string>
#include <unordered_map>
#include <vector>
#if __ANDROID_API__ >= 9
#include "android/asset_manager.h"
#include "android/asset_manager_jni.h"
#endif
#include "sherpa-onnx/csrc/offline-tts-frontend.h"
namespace sherpa_onnx {
class PiperPhonemizeLexicon : public OfflineTtsFrontend {
public:
PiperPhonemizeLexicon(const std::string &tokens, const std::string &data_dir);
#if __ANDROID_API__ >= 9
PiperPhonemizeLexicon(AAssetManager *mgr, const std::string &tokens,
const std::string &data_dir);
#endif
std::vector<std::vector<int64_t>> ConvertTextToTokenIds(
const std::string &text, const std::string &voice = "") const override;
private:
std::string data_dir_;
// map unicode codepoint to an integer ID
std::unordered_map<char32_t, int32_t> token2id_;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_PIPER_PHONEMIZE_LEXICON_H_
... ...
... ... @@ -48,7 +48,7 @@ TEST(PiperPhonemize, Case1) {
piper::eSpeakPhonemeConfig config;
// ./bin/espeak-ng --path ./install/share/espeak-ng-data/ --voices
// ./bin/espeak-ng-bin --path ./install/share/espeak-ng-data/ --voices
// to list available voices
config.voice = "en-us";
... ... @@ -61,15 +61,15 @@ TEST(PiperPhonemize, Case1) {
}
std::cout << "\n";
std::vector<piper::PhonemeId> phonemeIds;
std::map<piper::Phoneme, std::size_t> missingPhonemes;
std::vector<piper::PhonemeId> phoneme_ids;
std::map<piper::Phoneme, std::size_t> missing_phonemes;
{
piper::PhonemeIdConfig config;
phonemes_to_ids(phonemes[0], config, phonemeIds, missingPhonemes);
phonemes_to_ids(phonemes[0], config, phoneme_ids, missing_phonemes);
}
for (int32_t p : phonemeIds) {
for (int32_t p : phoneme_ids) {
std::cout << p << " ";
}
std::cout << "\n";
... ...
... ... @@ -545,6 +545,12 @@ static OfflineTtsConfig GetOfflineTtsConfig(JNIEnv *env, jobject config) {
ans.model.vits.tokens = p;
env->ReleaseStringUTFChars(s, p);
fid = env->GetFieldID(vits_cls, "dataDir", "Ljava/lang/String;");
s = (jstring)env->GetObjectField(vits, fid);
p = env->GetStringUTFChars(s, nullptr);
ans.model.vits.data_dir = p;
env->ReleaseStringUTFChars(s, p);
fid = env->GetFieldID(vits_cls, "noiseScale", "F");
ans.model.vits.noise_scale = env->GetFloatField(vits, fid);
... ... @@ -573,6 +579,9 @@ static OfflineTtsConfig GetOfflineTtsConfig(JNIEnv *env, jobject config) {
ans.rule_fsts = p;
env->ReleaseStringUTFChars(s, p);
fid = env->GetFieldID(cls, "maxNumSentences", "I");
ans.max_num_sentences = env->GetIntField(config, fid);
return ans;
}
... ... @@ -589,6 +598,11 @@ JNIEXPORT jlong JNICALL Java_com_k2fsa_sherpa_onnx_OfflineTts_new(
#endif
auto config = sherpa_onnx::GetOfflineTtsConfig(env, _config);
SHERPA_ONNX_LOGE("config:\n%s", config.ToString().c_str());
if (!config.Validate()) {
SHERPA_ONNX_LOGE("Erros found in config!");
}
auto tts = new sherpa_onnx::SherpaOnnxOfflineTts(
#if __ANDROID_API__ >= 9
mgr,
... ...
... ... @@ -16,17 +16,20 @@ void PybindOfflineTtsVitsModelConfig(py::module *m) {
py::class_<PyClass>(*m, "OfflineTtsVitsModelConfig")
.def(py::init<>())
.def(py::init<const std::string &, const std::string &,
const std::string &, float, float, float>(),
const std::string &, const std::string, float, float,
float>(),
py::arg("model"), py::arg("lexicon"), py::arg("tokens"),
py::arg("noise_scale") = 0.667, py::arg("noise_scale_w") = 0.8,
py::arg("length_scale") = 1.0)
py::arg("data_dir") = "", py::arg("noise_scale") = 0.667,
py::arg("noise_scale_w") = 0.8, py::arg("length_scale") = 1.0)
.def_readwrite("model", &PyClass::model)
.def_readwrite("lexicon", &PyClass::lexicon)
.def_readwrite("tokens", &PyClass::tokens)
.def_readwrite("data_dir", &PyClass::data_dir)
.def_readwrite("noise_scale", &PyClass::noise_scale)
.def_readwrite("noise_scale_w", &PyClass::noise_scale_w)
.def_readwrite("length_scale", &PyClass::length_scale)
.def("__str__", &PyClass::ToString);
.def("__str__", &PyClass::ToString)
.def("validate", &PyClass::Validate);
}
} // namespace sherpa_onnx
... ...
... ... @@ -30,10 +30,14 @@ static void PybindOfflineTtsConfig(py::module *m) {
using PyClass = OfflineTtsConfig;
py::class_<PyClass>(*m, "OfflineTtsConfig")
.def(py::init<>())
.def(py::init<const OfflineTtsModelConfig &, const std::string &>(),
py::arg("model"), py::arg("rule_fsts") = "")
.def(py::init<const OfflineTtsModelConfig &, const std::string &,
int32_t>(),
py::arg("model"), py::arg("rule_fsts") = "",
py::arg("max_num_sentences") = 2)
.def_readwrite("model", &PyClass::model)
.def_readwrite("rule_fsts", &PyClass::rule_fsts)
.def_readwrite("max_num_sentences", &PyClass::max_num_sentences)
.def("validate", &PyClass::Validate)
.def("__str__", &PyClass::ToString);
}
... ...
... ... @@ -578,6 +578,7 @@ func sherpaOnnxOfflineTtsVitsModelConfig(
model: String,
lexicon: String,
tokens: String,
dataDir: String = "",
noiseScale: Float = 0.667,
noiseScaleW: Float = 0.8,
lengthScale: Float = 1.0
... ... @@ -586,6 +587,7 @@ func sherpaOnnxOfflineTtsVitsModelConfig(
model: toCPointer(model),
lexicon: toCPointer(lexicon),
tokens: toCPointer(tokens),
data_dir: toCPointer(dataDir),
noise_scale: noiseScale,
noise_scale_w: noiseScaleW,
length_scale: lengthScale)
... ... @@ -607,11 +609,13 @@ func sherpaOnnxOfflineTtsModelConfig(
func sherpaOnnxOfflineTtsConfig(
model: SherpaOnnxOfflineTtsModelConfig,
ruleFsts: String = ""
ruleFsts: String = "",
maxNumSenetences: Int = 2
) -> SherpaOnnxOfflineTtsConfig {
return SherpaOnnxOfflineTtsConfig(
model: model,
rule_fsts: toCPointer(ruleFsts)
rule_fsts: toCPointer(ruleFsts),
max_num_sentences: Int32(maxNumSenetences)
)
}
... ...
... ... @@ -7,17 +7,12 @@ if [ ! -d ../build-swift-macos ]; then
exit 1
fi
if [ ! -d ./vits-vctk ]; then
echo "Please download the pre-trained model for testing."
echo "You can refer to"
echo ""
echo "https://k2-fsa.github.io/sherpa/onnx/tts/pretrained_models/vits.html#vctk-english-multi-speaker-109-speakers"
echo ""
echo "for help"
if [ ! -d ./vits-piper-en_US-amy-low ]; then
echo "Download a pre-trained model for testing."
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-vctk.tar.bz2
tar xvf vits-vctk.tar.bz2
rm vits-vctk.tar.bz2
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-amy-low.tar.bz2
tar xf vits-piper-en_US-amy-low.tar.bz2
rm vits-piper-en_US-amy-low.tar.bz2
fi
if [ ! -e ./tts ]; then
... ...
func run() {
let model = "./vits-vctk/vits-vctk.onnx"
let lexicon = "./vits-vctk/lexicon.txt"
let tokens = "./vits-vctk/tokens.txt"
let model = "./vits-piper-en_US-amy-low/en_US-amy-low.onnx"
let tokens = "./vits-piper-en_US-amy-low/tokens.txt"
let dataDir = "./vits-piper-en_US-amy-low/espeak-ng-data"
let vits = sherpaOnnxOfflineTtsVitsModelConfig(
model: model,
lexicon: lexicon,
tokens: tokens
lexicon: "",
tokens: tokens,
dataDir: dataDir
)
let modelConfig = sherpaOnnxOfflineTtsModelConfig(vits: vits)
var ttsConfig = sherpaOnnxOfflineTtsConfig(model: modelConfig)
let tts = SherpaOnnxOfflineTtsWrapper(config: &ttsConfig)
let text = "How are you doing? Fantastic!"
let text =
"“Today as always, men fall into two groups: slaves and free men. Whoever does not have two-thirds of his day for himself, is a slave, whatever he may be: a statesman, a businessman, an official, or a scholar.”"
let sid = 99
let speed: Float = 1.0
... ...