Fangjun Kuang
Committed by GitHub

Support WebAssembly for text-to-speech (#577)

name: wasm-simd-hf-space-en
on:
push:
branches:
- wasm-2
release:
types:
- published
workflow_dispatch:
concurrency:
group: wasm-simd-hf-space-en-${{ github.ref }}
cancel-in-progress: true
jobs:
wasm-simd-hf-space-en:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
- name: View emsdk version
shell: bash
run: |
emcc -v
echo "--------------------"
emcc --check
- name: Download model files
shell: bash
run: |
cd wasm/assets
ls -lh
echo "----------"
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-libritts_r-medium.tar.bz2
tar xf vits-piper-en_US-libritts_r-medium.tar.bz2
rm vits-piper-en_US-libritts_r-medium.tar.bz2
mv vits-piper-en_US-libritts_r-medium/en_US-libritts_r-medium.onnx ./model.onnx
mv vits-piper-en_US-libritts_r-medium/tokens.txt ./
mv vits-piper-en_US-libritts_r-medium/espeak-ng-data ./
rm -rf vits-piper-en_US-libritts_r-medium
ls -lh
- name: Build sherpa-onnx for WebAssembly
shell: bash
run: |
./build-wasm-simd.sh
- name: collect files
shell: bash
run: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
mv build-wasm-simd/install/bin/wasm sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-en
ls -lh sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-en
tar cjfv sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-en.tar.bz2 ./sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-en
- name: Upload wasm files
uses: actions/upload-artifact@v4
with:
name: sherpa-onnx-wasm-simd-en
path: ./sherpa-onnx-wasm-simd-*.tar.bz2
- name: Publish to huggingface
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
uses: nick-fields/retry@v2
with:
max_attempts: 20
timeout_seconds: 200
shell: bash
command: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
git config --global user.email "csukuangfj@gmail.com"
git config --global user.name "Fangjun Kuang"
rm -rf huggingface
export GIT_LFS_SKIP_SMUDGE=1
git clone https://huggingface.co/spaces/k2-fsa/web-assembly-tts-sherpa-onnx-en huggingface
cd huggingface
git fetch
git pull
git merge -m "merge remote" --ff origin main
cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-en/* .
git status
git lfs track "*.data"
git lfs track "*.wasm"
ls -lh
git add .
git commit -m "update model"
git push https://csukuangfj:$HF_TOKEN@huggingface.co/spaces/k2-fsa/web-assembly-tts-sherpa-onnx-en main
... ...
... ... @@ -20,6 +20,8 @@ option(SHERPA_ONNX_ENABLE_JNI "Whether to build JNI internface" OFF)
option(SHERPA_ONNX_ENABLE_C_API "Whether to build C API" ON)
option(SHERPA_ONNX_ENABLE_WEBSOCKET "Whether to build webscoket server/client" ON)
option(SHERPA_ONNX_ENABLE_GPU "Enable ONNX Runtime GPU support" OFF)
option(SHERPA_ONNX_ENABLE_WASM "Whether to enable WASM" OFF)
option(SHERPA_ONNX_ENABLE_BINARY "Whether to build binaries" ON)
option(SHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY "True to link libstdc++ statically. Used only when BUILD_SHARED_LIBS is OFF on Linux" ON)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
... ... @@ -99,6 +101,10 @@ message(STATUS "SHERPA_ONNX_ENABLE_JNI ${SHERPA_ONNX_ENABLE_JNI}")
message(STATUS "SHERPA_ONNX_ENABLE_C_API ${SHERPA_ONNX_ENABLE_C_API}")
message(STATUS "SHERPA_ONNX_ENABLE_WEBSOCKET ${SHERPA_ONNX_ENABLE_WEBSOCKET}")
message(STATUS "SHERPA_ONNX_ENABLE_GPU ${SHERPA_ONNX_ENABLE_GPU}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM ${SHERPA_ONNX_ENABLE_WASM}")
if(SHERPA_ONNX_ENABLE_WASM)
add_definitions(-DSHERPA_ONNX_ENABLE_WASM=1)
endif()
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14 CACHE STRING "The C++ version to be used.")
... ... @@ -109,7 +115,7 @@ message(STATUS "C++ Standard version: ${CMAKE_CXX_STANDARD}")
include(CheckIncludeFileCXX)
if(UNIX AND NOT APPLE)
if(UNIX AND NOT APPLE AND NOT SHERPA_ONNX_ENABLE_WASM)
check_include_file_cxx(alsa/asoundlib.h SHERPA_ONNX_HAS_ALSA)
if(SHERPA_ONNX_HAS_ALSA)
add_definitions(-DSHERPA_ONNX_ENABLE_ALSA=1)
... ... @@ -160,6 +166,11 @@ endif()
list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules)
list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake)
if(SHERPA_ONNX_ENABLE_WASM)
# Enable it for debugging in case there is something wrong.
# string(APPEND CMAKE_CXX_FLAGS " -g4 -s ASSERTIONS=2 -s SAFE_HEAP=1 -s STACK_OVERFLOW_CHECK=1 ")
endif()
if(NOT BUILD_SHARED_LIBS AND CMAKE_SYSTEM_NAME STREQUAL Linux)
if(SHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY)
message(STATUS "Link libstdc++ statically")
... ... @@ -200,9 +211,14 @@ include(piper-phonemize)
add_subdirectory(sherpa-onnx)
if(SHERPA_ONNX_ENABLE_C_API)
if(SHERPA_ONNX_ENABLE_C_API AND SHERPA_ONNX_ENABLE_BINARY)
add_subdirectory(c-api-examples)
endif()
if(SHERPA_ONNX_ENABLE_WASM)
add_subdirectory(wasm)
endif()
message(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
if(NOT BUILD_SHARED_LIBS)
... ...
#!/usr/bin/env bash
# Copyright (c) 2024 Xiaomi Corporation
#
# This script is to build sherpa-onnx for WebAssembly
set -ex
if [ x"$EMSCRIPTEN" == x"" ]; then
if ! command -v emcc &> /dev/null; then
echo "Please install emscripten first"
echo ""
echo "You can use the following commands to install it:"
echo ""
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "source ./emsdk_env.sh"
exit 1
else
EMSCRIPTEN=$(dirname $(realpath $(which emcc)))
fi
fi
export EMSCRIPTEN=$EMSCRIPTEN
echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
exit 1
fi
mkdir -p build-wasm-simd
pushd build-wasm-simd
export SHERPA_ONNX_IS_USING_BUILD_WASM_SH=ON
cmake \
-DCMAKE_INSTALL_PREFIX=./install \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE=$EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake \
\
-DSHERPA_ONNX_ENABLE_PYTHON=OFF \
-DSHERPA_ONNX_ENABLE_TESTS=OFF \
-DSHERPA_ONNX_ENABLE_CHECK=OFF \
-DBUILD_SHARED_LIBS=OFF \
-DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
-DSHERPA_ONNX_ENABLE_JNI=OFF \
-DSHERPA_ONNX_ENABLE_C_API=ON \
-DSHERPA_ONNX_ENABLE_WEBSOCKET=OFF \
-DSHERPA_ONNX_ENABLE_GPU=OFF \
-DSHERPA_ONNX_ENABLE_WASM=ON \
-DSHERPA_ONNX_ENABLE_BINARY=OFF \
-DSHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY=OFF \
..
make -j2
make install
... ...
function(download_espeak_ng_for_piper)
include(FetchContent)
set(espeak_ng_URL "https://github.com/csukuangfj/espeak-ng/archive/c58d2a4a88e9a291ca448f046e15c6188cbd3b3a.zip")
set(espeak_ng_URL2 "")
set(espeak_ng_HASH "SHA256=8a48251e6926133dd91fcf6cb210c7c2e290a9b578d269446e2d32d710b0dfa0")
set(espeak_ng_URL "https://github.com/csukuangfj/espeak-ng/archive/69bf6927964fb042aeb827cfdf6082a30f5802eb.zip")
set(espeak_ng_URL2 "https://hub.nuaa.cf/csukuangfj/espeak-ng/archive/69bf6927964fb042aeb827cfdf6082a30f5802eb.zip")
set(espeak_ng_HASH "SHA256=745e35b21ece6804b4a1839722f9e625ac909380c8f85873ad71bf145877075a")
set(BUILD_ESPEAK_NG_TESTS OFF CACHE BOOL "" FORCE)
set(USE_ASYNC OFF CACHE BOOL "" FORCE)
... ... @@ -15,14 +15,18 @@ function(download_espeak_ng_for_piper)
set(EXTRA_cmn ON CACHE BOOL "" FORCE)
set(EXTRA_ru ON CACHE BOOL "" FORCE)
if(SHERPA_ONNX_ENABLE_WASM)
set(BUILD_ESPEAK_NG_EXE OFF CACHE BOOL "" FORCE)
endif()
# If you don't have access to the Internet,
# please pre-download kaldi-decoder
set(possible_file_locations
$ENV{HOME}/Downloads/espeak-ng-c58d2a4a88e9a291ca448f046e15c6188cbd3b3a.zip
${CMAKE_SOURCE_DIR}/espeak-ng-c58d2a4a88e9a291ca448f046e15c6188cbd3b3a.zip
${CMAKE_BINARY_DIR}/espeak-ng-c58d2a4a88e9a291ca448f046e15c6188cbd3b3a.zip
/tmp/espeak-ng-c58d2a4a88e9a291ca448f046e15c6188cbd3b3a.zip
/star-fj/fangjun/download/github/espeak-ng-c58d2a4a88e9a291ca448f046e15c6188cbd3b3a.zip
$ENV{HOME}/Downloads/espeak-ng-69bf6927964fb042aeb827cfdf6082a30f5802eb.zip
${CMAKE_SOURCE_DIR}/espeak-ng-69bf6927964fb042aeb827cfdf6082a30f5802eb.zip
${CMAKE_BINARY_DIR}/espeak-ng-69bf6927964fb042aeb827cfdf6082a30f5802eb.zip
/tmp/espeak-ng-69bf6927964fb042aeb827cfdf6082a30f5802eb.zip
/star-fj/fangjun/download/github/espeak-ng-69bf6927964fb042aeb827cfdf6082a30f5802eb.zip
)
foreach(f IN LISTS possible_file_locations)
... ...
function(download_kaldi_decoder)
include(FetchContent)
set(kaldi_decoder_URL "https://github.com/k2-fsa/kaldi-decoder/archive/refs/tags/v0.2.3.tar.gz")
set(kaldi_decoder_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/kaldi-decoder-0.2.3.tar.gz")
set(kaldi_decoder_HASH "SHA256=98bf445a5b7961ccf3c3522317d900054eaadb6a9cdcf4531e7d9caece94a56d")
set(kaldi_decoder_URL "https://github.com/k2-fsa/kaldi-decoder/archive/refs/tags/v0.2.4.tar.gz")
set(kaldi_decoder_URL2 "https://hub.nuaa.cf/k2-fsa/kaldi-decoder/archive/refs/tags/v0.2.4.tar.gz")
set(kaldi_decoder_HASH "SHA256=136d96c2f1f8ec44de095205f81a6ce98981cd867fe4ba840f9415a0b58fe601")
set(KALDI_DECODER_BUILD_PYTHON OFF CACHE BOOL "" FORCE)
set(KALDI_DECODER_ENABLE_TESTS OFF CACHE BOOL "" FORCE)
... ... @@ -12,11 +12,11 @@ function(download_kaldi_decoder)
# If you don't have access to the Internet,
# please pre-download kaldi-decoder
set(possible_file_locations
$ENV{HOME}/Downloads/kaldi-decoder-0.2.3.tar.gz
${CMAKE_SOURCE_DIR}/kaldi-decoder-0.2.3.tar.gz
${CMAKE_BINARY_DIR}/kaldi-decoder-0.2.3.tar.gz
/tmp/kaldi-decoder-0.2.3.tar.gz
/star-fj/fangjun/download/github/kaldi-decoder-0.2.3.tar.gz
$ENV{HOME}/Downloads/kaldi-decoder-0.2.4.tar.gz
${CMAKE_SOURCE_DIR}/kaldi-decoder-0.2.4.tar.gz
${CMAKE_BINARY_DIR}/kaldi-decoder-0.2.4.tar.gz
/tmp/kaldi-decoder-0.2.4.tar.gz
/star-fj/fangjun/download/github/kaldi-decoder-0.2.4.tar.gz
)
foreach(f IN LISTS possible_file_locations)
... ...
function(download_kaldifst)
include(FetchContent)
set(kaldifst_URL "https://github.com/k2-fsa/kaldifst/archive/refs/tags/v1.7.9.tar.gz")
set(kaldifst_URL2 "https://huggingface.co/csukuangfj/kaldi-hmm-gmm-cmake-deps/resolve/main/kaldifst-1.7.9.tar.gz")
set(kaldifst_HASH "SHA256=8c653021491dca54c38ab659565edfab391418a79ae87099257863cd5664dd39")
set(kaldifst_URL "https://github.com/k2-fsa/kaldifst/archive/refs/tags/v1.7.10.tar.gz")
set(kaldifst_URL2 "https://hub.nuaa.cf/k2-fsa/kaldifst/archive/refs/tags/v1.7.10.tar.gz")
set(kaldifst_HASH "SHA256=7f7b3173a6584a6b1987f65ae7af2ac453d66b845f875a9d31074b8d2cd0de54")
# If you don't have access to the Internet,
# please pre-download kaldifst
set(possible_file_locations
$ENV{HOME}/Downloads/kaldifst-1.7.9.tar.gz
${CMAKE_SOURCE_DIR}/kaldifst-1.7.9.tar.gz
${CMAKE_BINARY_DIR}/kaldifst-1.7.9.tar.gz
/tmp/kaldifst-1.7.9.tar.gz
/star-fj/fangjun/download/github/kaldifst-1.7.9.tar.gz
$ENV{HOME}/Downloads/kaldifst-1.7.10.tar.gz
${CMAKE_SOURCE_DIR}/kaldifst-1.7.10.tar.gz
${CMAKE_BINARY_DIR}/kaldifst-1.7.10.tar.gz
/tmp/kaldifst-1.7.10.tar.gz
/star-fj/fangjun/download/github/kaldifst-1.7.10.tar.gz
)
foreach(f IN LISTS possible_file_locations)
... ...
# Copyright (c) 2022-2024 Xiaomi Corporation
message(STATUS "CMAKE_SYSTEM_NAME: ${CMAKE_SYSTEM_NAME}")
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
if(NOT SHERPA_ONNX_ENABLE_WASM)
message(FATAL_ERROR "This file is for WebAssembly.")
endif()
if(BUILD_SHARED_LIBS)
message(FATAL_ERROR "BUILD_SHARED_LIBS should be OFF for WebAssembly")
endif()
set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v1.17.0/onnxruntime-wasm-static_lib-simd-1.17.0.zip")
set(onnxruntime_URL2 "https://hub.nuaa.cf/csukuangfj/onnxruntime-libs/releases/download/v1.17.0/onnxruntime-wasm-static_lib-simd-1.17.0.zip")
set(onnxruntime_HASH "SHA256=0ee6120d2ade093eff731af792fd137ac2db580eb2dc5b8bf39e0897b0d7afd9")
# If you don't have access to the Internet,
# please download onnxruntime to one of the following locations.
# You can add more if you want.
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-wasm-static_lib-simd-1.17.0.zip
${CMAKE_SOURCE_DIR}/onnxruntime-wasm-static_lib-simd-1.17.0.zip
${CMAKE_BINARY_DIR}/onnxruntime-wasm-static_lib-simd-1.17.0.zip
/tmp/onnxruntime-wasm-static_lib-simd-1.17.0.zip
/star-fj/fangjun/download/github/onnxruntime-wasm-static_lib-simd-1.17.0.zip
)
foreach(f IN LISTS possible_file_locations)
if(EXISTS ${f})
set(onnxruntime_URL "${f}")
file(TO_CMAKE_PATH "${onnxruntime_URL}" onnxruntime_URL)
message(STATUS "Found local downloaded onnxruntime: ${onnxruntime_URL}")
set(onnxruntime_URL2)
break()
endif()
endforeach()
FetchContent_Declare(onnxruntime
URL
${onnxruntime_URL}
${onnxruntime_URL2}
URL_HASH ${onnxruntime_HASH}
)
FetchContent_GetProperties(onnxruntime)
if(NOT onnxruntime_POPULATED)
message(STATUS "Downloading onnxruntime from ${onnxruntime_URL}")
FetchContent_Populate(onnxruntime)
endif()
message(STATUS "onnxruntime is downloaded to ${onnxruntime_SOURCE_DIR}")
# for static libraries, we use onnxruntime_lib_files directly below
include_directories(${onnxruntime_SOURCE_DIR}/include)
file(GLOB onnxruntime_lib_files "${onnxruntime_SOURCE_DIR}/lib/lib*.a")
set(onnxruntime_lib_files ${onnxruntime_lib_files} PARENT_SCOPE)
message(STATUS "onnxruntime lib files: ${onnxruntime_lib_files}")
install(FILES ${onnxruntime_lib_files} DESTINATION lib)
... ...
... ... @@ -4,8 +4,9 @@ function(download_onnxruntime)
message(STATUS "CMAKE_SYSTEM_NAME: ${CMAKE_SYSTEM_NAME}")
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
if(CMAKE_SYSTEM_NAME STREQUAL Linux AND CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
if(SHERPA_ONNX_ENABLE_WASM)
include(onnxruntime-wasm-simd)
elseif(CMAKE_SYSTEM_NAME STREQUAL Linux AND CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
if(BUILD_SHARED_LIBS)
include(onnxruntime-linux-aarch64)
else()
... ...
... ... @@ -12,4 +12,3 @@ install(TARGETS sherpa-onnx-c-api DESTINATION lib)
install(FILES c-api.h
DESTINATION include/sherpa-onnx/c-api
)
... ...
... ... @@ -11,6 +11,7 @@
#include "sherpa-onnx/csrc/circular-buffer.h"
#include "sherpa-onnx/csrc/display.h"
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/offline-recognizer.h"
#include "sherpa-onnx/csrc/offline-tts.h"
#include "sherpa-onnx/csrc/online-recognizer.h"
... ... @@ -90,7 +91,7 @@ SherpaOnnxOnlineRecognizer *CreateOnlineRecognizer(
SHERPA_ONNX_OR(config->hotwords_score, 1.5);
if (config->model_config.debug) {
fprintf(stderr, "%s\n", recognizer_config.ToString().c_str());
SHERPA_ONNX_LOGE("%s\n", recognizer_config.ToString().c_str());
}
SherpaOnnxOnlineRecognizer *recognizer = new SherpaOnnxOnlineRecognizer;
... ... @@ -320,7 +321,7 @@ SherpaOnnxOfflineRecognizer *CreateOfflineRecognizer(
SHERPA_ONNX_OR(config->hotwords_score, 1.5);
if (config->model_config.debug) {
fprintf(stderr, "%s\n", recognizer_config.ToString().c_str());
SHERPA_ONNX_LOGE("%s", recognizer_config.ToString().c_str());
}
SherpaOnnxOfflineRecognizer *recognizer = new SherpaOnnxOfflineRecognizer;
... ... @@ -476,7 +477,7 @@ SherpaOnnxVoiceActivityDetector *SherpaOnnxCreateVoiceActivityDetector(
vad_config.debug = SHERPA_ONNX_OR(config->debug, false);
if (vad_config.debug) {
fprintf(stderr, "%s\n", vad_config.ToString().c_str());
SHERPA_ONNX_LOGE("%s", vad_config.ToString().c_str());
}
SherpaOnnxVoiceActivityDetector *p = new SherpaOnnxVoiceActivityDetector;
... ... @@ -566,7 +567,7 @@ SherpaOnnxOfflineTts *SherpaOnnxCreateOfflineTts(
tts_config.max_num_sentences = SHERPA_ONNX_OR(config->max_num_sentences, 2);
if (tts_config.model.debug) {
fprintf(stderr, "%s\n", tts_config.ToString().c_str());
SHERPA_ONNX_LOGE("%s\n", tts_config.ToString().c_str());
}
SherpaOnnxOfflineTts *tts = new SherpaOnnxOfflineTts;
... ... @@ -582,6 +583,10 @@ int32_t SherpaOnnxOfflineTtsSampleRate(const SherpaOnnxOfflineTts *tts) {
return tts->impl->SampleRate();
}
int32_t SherpaOnnxOfflineTtsNumSpeakers(const SherpaOnnxOfflineTts *tts) {
return tts->impl->NumSpeakers();
}
const SherpaOnnxGeneratedAudio *SherpaOnnxOfflineTtsGenerate(
const SherpaOnnxOfflineTts *tts, const char *text, int32_t sid,
float speed) {
... ...
... ... @@ -658,6 +658,10 @@ SHERPA_ONNX_API void SherpaOnnxDestroyOfflineTts(SherpaOnnxOfflineTts *tts);
SHERPA_ONNX_API int32_t
SherpaOnnxOfflineTtsSampleRate(const SherpaOnnxOfflineTts *tts);
// Return the number of speakers of the current TTS object
SHERPA_ONNX_API int32_t
SherpaOnnxOfflineTtsNumSpeakers(const SherpaOnnxOfflineTts *tts);
// Generate audio from the given text and speaker id (sid).
// The user has to use DestroyOfflineTtsGeneratedAudio() to free the
// returned pointer to avoid memory leak.
... ...
... ... @@ -128,9 +128,6 @@ if(APPLE)
)
endif()
if(NOT WIN32)
target_link_libraries(sherpa-onnx-core -pthread)
endif()
if(ANDROID_NDK)
target_link_libraries(sherpa-onnx-core android log)
... ... @@ -172,36 +169,42 @@ if(SHERPA_ONNX_ENABLE_CHECK)
endif()
if(NOT BUILD_SHARED_LIBS AND CMAKE_SYSTEM_NAME STREQUAL Linux)
target_link_libraries(sherpa-onnx-core -pthread -ldl)
target_link_libraries(sherpa-onnx-core -ldl)
endif()
add_executable(sherpa-onnx sherpa-onnx.cc)
add_executable(sherpa-onnx-keyword-spotter sherpa-onnx-keyword-spotter.cc)
add_executable(sherpa-onnx-offline sherpa-onnx-offline.cc)
add_executable(sherpa-onnx-offline-parallel sherpa-onnx-offline-parallel.cc)
add_executable(sherpa-onnx-offline-tts sherpa-onnx-offline-tts.cc)
set(main_exes
sherpa-onnx
sherpa-onnx-keyword-spotter
sherpa-onnx-offline
sherpa-onnx-offline-parallel
sherpa-onnx-offline-tts
)
if(NOT WIN32 AND NOT SHERPA_ONNX_ENABLE_WASM AND CMAKE_SYSTEM_NAME STREQUAL Linux)
target_link_libraries(sherpa-onnx-core -pthread)
endif()
foreach(exe IN LISTS main_exes)
target_link_libraries(${exe} sherpa-onnx-core)
endforeach()
if(SHERPA_ONNX_ENABLE_BINARY)
add_executable(sherpa-onnx sherpa-onnx.cc)
add_executable(sherpa-onnx-keyword-spotter sherpa-onnx-keyword-spotter.cc)
add_executable(sherpa-onnx-offline sherpa-onnx-offline.cc)
add_executable(sherpa-onnx-offline-parallel sherpa-onnx-offline-parallel.cc)
add_executable(sherpa-onnx-offline-tts sherpa-onnx-offline-tts.cc)
set(main_exes
sherpa-onnx
sherpa-onnx-keyword-spotter
sherpa-onnx-offline
sherpa-onnx-offline-parallel
sherpa-onnx-offline-tts
)
if(NOT WIN32)
foreach(exe IN LISTS main_exes)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
if(SHERPA_ONNX_ENABLE_PYTHON)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
endif()
target_link_libraries(${exe} sherpa-onnx-core)
endforeach()
if(NOT WIN32)
foreach(exe IN LISTS main_exes)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib")
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../../../sherpa_onnx/lib")
if(SHERPA_ONNX_ENABLE_PYTHON)
target_link_libraries(${exe} "-Wl,-rpath,${SHERPA_ONNX_RPATH_ORIGIN}/../lib/python${PYTHON_VERSION}/site-packages/sherpa_onnx/lib")
endif()
endforeach()
endif()
endif()
if(SHERPA_ONNX_ENABLE_PYTHON AND WIN32)
... ... @@ -214,14 +217,16 @@ if(WIN32 AND BUILD_SHARED_LIBS)
install(TARGETS sherpa-onnx-core DESTINATION bin)
endif()
install(
TARGETS
${main_exes}
DESTINATION
bin
)
if(SHERPA_ONNX_ENABLE_BINARY)
install(
TARGETS
${main_exes}
DESTINATION
bin
)
endif()
if(SHERPA_ONNX_HAS_ALSA)
if(SHERPA_ONNX_HAS_ALSA AND SHERPA_ONNX_ENABLE_BINARY)
add_executable(sherpa-onnx-alsa sherpa-onnx-alsa.cc alsa.cc)
add_executable(sherpa-onnx-offline-tts-play-alsa sherpa-onnx-offline-tts-play-alsa.cc alsa-play.cc)
... ... @@ -261,7 +266,7 @@ if(SHERPA_ONNX_HAS_ALSA)
)
endif()
if(SHERPA_ONNX_ENABLE_PORTAUDIO)
if(SHERPA_ONNX_ENABLE_PORTAUDIO AND SHERPA_ONNX_ENABLE_BINARY)
add_executable(sherpa-onnx-offline-tts-play
sherpa-onnx-offline-tts-play.cc
microphone.cc
... ... @@ -330,7 +335,7 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO)
)
endif()
if(SHERPA_ONNX_ENABLE_WEBSOCKET)
if(SHERPA_ONNX_ENABLE_WEBSOCKET AND SHERPA_ONNX_ENABLE_BINARY)
add_definitions(-DASIO_STANDALONE)
add_definitions(-D_WEBSOCKETPP_CPP11_STL_)
... ...
... ... @@ -16,6 +16,14 @@
fprintf(stderr, "\n"); \
__android_log_print(ANDROID_LOG_WARN, "sherpa-onnx", ##__VA_ARGS__); \
} while (0)
#elif SHERPA_ONNX_ENABLE_WASM
#define SHERPA_ONNX_LOGE(...) \
do { \
fprintf(stdout, "%s:%s:%d ", __FILE__, __func__, \
static_cast<int>(__LINE__)); \
fprintf(stdout, ##__VA_ARGS__); \
fprintf(stdout, "\n"); \
} while (0)
#else
#define SHERPA_ONNX_LOGE(...) \
do { \
... ...
if(NOT $ENV{SHERPA_ONNX_IS_USING_BUILD_WASM_SH})
message(FATAL_ERROR "Please use ./build-wasm.sh to build for wasm")
endif()
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/decoder_jit_trace-pnnx.ncnn.bin")
message(WARNING "${CMAKE_CURRENT_SOURCE_DIR}/assets/decoder_jit_trace-pnnx.ncnn.bin does not exist")
# message(FATAL_ERROR "Please read ${CMAKE_CURRENT_SOURCE_DIR}/assets/README.md before you continue")
endif()
set(exported_functions
MyPrint
SherpaOnnxCreateOfflineTts
SherpaOnnxDestroyOfflineTts
SherpaOnnxDestroyOfflineTtsGeneratedAudio
SherpaOnnxOfflineTtsGenerate
SherpaOnnxOfflineTtsGenerateWithCallback
SherpaOnnxOfflineTtsNumSpeakers
SherpaOnnxOfflineTtsSampleRate
SherpaOnnxWriteWave
)
set(mangled_exported_functions)
foreach(x IN LISTS exported_functions)
list(APPEND mangled_exported_functions "_${x}")
endforeach()
list(JOIN mangled_exported_functions "," all_exported_functions)
include_directories(${CMAKE_SOURCE_DIR})
set(MY_FLAGS " -s FORCE_FILESYSTEM=1 -s INITIAL_MEMORY=512MB -s ALLOW_MEMORY_GROWTH=1")
string(APPEND MY_FLAGS " -sSTACK_SIZE=10485760 ") # 10MB
string(APPEND MY_FLAGS " -sEXPORTED_FUNCTIONS=[_CopyHeap,_malloc,_free,${all_exported_functions}] ")
string(APPEND MY_FLAGS "--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/assets@. ")
string(APPEND MY_FLAGS " -sEXPORTED_RUNTIME_METHODS=['ccall','stringToUTF8','setValue','getValue'] ")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_FLAGS}")
set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${MY_FLAGS}")
if (NOT CMAKE_EXECUTABLE_SUFFIX STREQUAL ".js")
message(FATAL_ERROR "The default suffix for building executables should be .js!")
endif()
# set(CMAKE_EXECUTABLE_SUFFIX ".html")
add_executable(sherpa-onnx-wasm-main sherpa-onnx-wasm-main.cc)
target_link_libraries(sherpa-onnx-wasm-main sherpa-onnx-c-api)
install(TARGETS sherpa-onnx-wasm-main DESTINATION bin/wasm)
install(
FILES
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main>/sherpa-onnx-wasm-main.js"
"index.html"
"sherpa-onnx.js"
"app.js"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main>/sherpa-onnx-wasm-main.wasm"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main>/sherpa-onnx-wasm-main.data"
# "$<TARGET_FILE_DIR:sherpa-onnx-wasm-main>/sherpa-onnx-wasm-main.html"
DESTINATION
bin/wasm
)
... ...
const generateBtn = document.getElementById('generateBtn');
const hint = document.getElementById('hint');
const speakerIdLabel = document.getElementById('speakerIdLabel');
const speakerIdInput = document.getElementById('speakerId');
const speedInput = document.getElementById('speed');
const speedValue = document.getElementById('speedValue');
const textArea = document.getElementById('text');
const soundClips = document.getElementById('sound-clips');
speedValue.innerHTML = speedInput.value;
let index = 0;
let tts = null;
let audioCtx = null;
Module = {};
Module.onRuntimeInitialized = function() {
console.log('Model files downloaded!');
console.log('Initializing tts ......');
tts = initSherpaOnnxOfflineTts()
if (tts.numSpeakers > 1) {
speakerIdLabel.innerHTML = `Speaker ID (0 - ${tts.numSpeakers - 1}):`;
}
hint.innerText =
'Initialized! Please enter text and click the Generate button.';
generateBtn.disabled = false;
};
speedInput.oninput = function() {
speedValue.innerHTML = this.value;
};
generateBtn.onclick = function() {
let speakerId = speakerIdInput.value;
if (speakerId.trim().length == 0) {
alert('Please input a speakerId');
return;
}
if (!speakerId.match(/^\d+$/)) {
alert(`Input speakerID ${
speakerId} is not a number.\nPlease enter a number between 0 and ${
tts.numSpeakers - 1}`);
return;
}
speakerId = parseInt(speakerId, 10);
if (speakerId > tts.numSpeakers - 1) {
alert(`Pleaser enter a number between 0 and ${tts.numSpeakers - 1}`);
return;
}
let text = textArea.value.trim();
if (text.length == 0) {
alert('Please input a non-blank text');
return;
}
console.log('speakerId', speakerId);
console.log('speed', speedInput.value);
console.log('text', text);
let audio =
tts.generate({text: text, sid: speakerId, speed: speedInput.value});
console.log(audio.samples.length, audio.sampleRate);
if (!audioCtx) {
audioCtx = new AudioContext({sampleRate: tts.sampleRate});
}
const buffer = audioCtx.createBuffer(1, audio.samples.length, tts.sampleRate);
const ptr = buffer.getChannelData(0);
for (let i = 0; i < audio.samples.length; i++) {
ptr[i] = audio.samples[i];
}
const source = audioCtx.createBufferSource();
source.buffer = buffer;
source.connect(audioCtx.destination);
source.start();
createAudioTag(audio);
};
function createAudioTag(generateAudio) {
const blob = toWav(generateAudio.samples, generateAudio.sampleRate);
const text = textArea.value.trim().substring(0, 100);
const clipName = `${index} ${text} ...`;
index += 1;
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
const deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.textContent = 'Delete';
deleteButton.className = 'delete';
clipLabel.textContent = clipName;
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
deleteButton.onclick = function(e) {
let evtTgt = e.target;
evtTgt.parentNode.parentNode.removeChild(evtTgt.parentNode);
};
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
};
}
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function toWav(floatSamples, sampleRate) {
let samples = new Int16Array(floatSamples.length);
for (let i = 0; i < samples.length; ++i) {
let s = floatSamples[i];
if (s >= 1)
s = 1;
else if (s <= -1)
s = -1;
samples[i] = s * 32767;
}
let buf = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buf);
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
view.setUint32(8, 0x45564157, true); // format
//
// t m f
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, sampleRate, true); // sampleRate
view.setUint32(28, sampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
view.setInt16(offset, samples[i], true);
offset += 2;
}
return new Blob([view], {type: 'audio/wav'});
}
... ...
*.onnx
*.txt
espeak-ng-data
... ...
# Introduction
Please refer to
https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
to download a model.
The following is an example:
```
cd sherpa-onnx/wasm/tts/assets
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-libritts_r-medium.tar.bz2
tar xf vits-piper-en_US-libritts_r-medium.tar.bz2
rm vits-piper-en_US-libritts_r-medium.tar.bz2
mv vits-piper-en_US-libritts_r-medium/en_US-libritts_r-medium.onnx ./model.onnx
mv vits-piper-en_US-libritts_r-medium/tokens.txt ./
mv vits-piper-en_US-libritts_r-medium/espeak-ng-data ./
rm -rf vits-piper-en_US-libritts_r-medium
```
You should have the following files in `assets` before you can run
`build-wasm-simd.sh`
```
assets fangjun$ tree -L 1
.
├── README.md
├── espeak-ng-data
├── mode.onnx
└── tokens.txt
1 directory, 3 files
```
... ...
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width" />
<title>Next-gen Kaldi WebAssembly with sherpa-onnx for Text-to-speech</title>
<style>
h1,div {
text-align: center;
}
textarea {
width:100%;
}
</style>
</head>
<body>
<h1>
Next-gen Kaldi + WebAssembly<br/>
Text-to-speech Demo with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a>
</h1>
<div>
<span id="hint">Loading model ... ...</span>
<br/>
<br/>
<label for="speakerId" id="speakerIdLabel">Speaker ID: </label>
<input type="text" id="speakerId" name="speakerId" value="0" />
<br/>
<br/>
<label for="speed" id="speedLabel">Speed: </label>
<input type="range" id="speed" name="speed" min="0.4" max="3.5" step="0.1" value="1.0" />
<span id="speedValue"></span>
<br/>
<br/>
<textarea id="text" rows="10" placeholder="Please enter your text here and click the Generate button"></textarea>
<br/>
<br/>
<button id="generateBtn" disabled>Generate</button>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
<script src="app.js"></script>
<script src="sherpa-onnx.js"></script>
<script src="sherpa-onnx-wasm-main.js"></script>
</body>
... ...
// wasm/sherpa-onnx-wasm-main.cc
//
// Copyright (c) 2024 Xiaomi Corporation
#include <stdio.h>
#include <algorithm>
#include <memory>
#include "sherpa-onnx/c-api/c-api.h"
// see also
// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html
extern "C" {
static_assert(sizeof(SherpaOnnxOfflineTtsVitsModelConfig) == 7 * 4, "");
static_assert(sizeof(SherpaOnnxOfflineTtsModelConfig) ==
sizeof(SherpaOnnxOfflineTtsVitsModelConfig) + 3 * 4,
"");
static_assert(sizeof(SherpaOnnxOfflineTtsConfig) ==
sizeof(SherpaOnnxOfflineTtsModelConfig) + 2 * 4,
"");
void MyPrint(SherpaOnnxOfflineTtsConfig *tts_config) {
auto tts_model_config = &tts_config->model;
auto vits_model_config = &tts_model_config->vits;
fprintf(stdout, "----------vits model config----------\n");
fprintf(stdout, "model: %s\n", vits_model_config->model);
fprintf(stdout, "lexicon: %s\n", vits_model_config->lexicon);
fprintf(stdout, "tokens: %s\n", vits_model_config->tokens);
fprintf(stdout, "data_dir: %s\n", vits_model_config->data_dir);
fprintf(stdout, "noise scale: %.3f\n", vits_model_config->noise_scale);
fprintf(stdout, "noise scale w: %.3f\n", vits_model_config->noise_scale_w);
fprintf(stdout, "length scale: %.3f\n", vits_model_config->length_scale);
fprintf(stdout, "----------tts model config----------\n");
fprintf(stdout, "num threads: %d\n", tts_model_config->num_threads);
fprintf(stdout, "debug: %d\n", tts_model_config->debug);
fprintf(stdout, "provider: %s\n", tts_model_config->provider);
fprintf(stdout, "----------tts config----------\n");
fprintf(stdout, "rule_fsts: %s\n", tts_config->rule_fsts);
fprintf(stdout, "max num sentences: %d\n", tts_config->max_num_sentences);
}
void CopyHeap(const char *src, int32_t num_bytes, char *dst) {
std::copy(src, src + num_bytes, dst);
}
}
... ...
function freeConfig(config) {
if ('buffer' in config) {
_free(config.buffer);
}
if ('config' in config) {
freeConfig(config.config)
}
_free(config.ptr);
}
// The user should free the returned pointers
function initSherpaOnnxOfflineTtsVitsModelConfig(config) {
let modelLen = lengthBytesUTF8(config.model) + 1;
let lexiconLen = lengthBytesUTF8(config.lexicon) + 1;
let tokensLen = lengthBytesUTF8(config.tokens) + 1;
let dataDirLen = lengthBytesUTF8(config.dataDir) + 1;
let n = modelLen + lexiconLen + tokensLen + dataDirLen;
let buffer = _malloc(n);
let len = 7 * 4;
let ptr = _malloc(len);
let offset = 0;
stringToUTF8(config.model, buffer + offset, modelLen);
offset += modelLen;
stringToUTF8(config.lexicon, buffer + offset, lexiconLen);
offset += lexiconLen;
stringToUTF8(config.tokens, buffer + offset, tokensLen);
offset += tokensLen;
stringToUTF8(config.dataDir, buffer + offset, dataDirLen);
offset += dataDirLen;
offset = 0;
setValue(ptr, buffer + offset, 'i8*');
offset += modelLen;
setValue(ptr + 4, buffer + offset, 'i8*');
offset += lexiconLen;
setValue(ptr + 8, buffer + offset, 'i8*');
offset += tokensLen;
setValue(ptr + 12, buffer + offset, 'i8*');
offset += dataDirLen;
setValue(ptr + 16, config.noiseScale, 'float');
setValue(ptr + 20, config.noiseScaleW, 'float');
setValue(ptr + 24, config.lengthScale, 'float');
return {
buffer: buffer, ptr: ptr, len: len,
}
}
function initSherpaOnnxOfflineTtsModelConfig(config) {
let vitsModelConfig =
initSherpaOnnxOfflineTtsVitsModelConfig(config.offlineTtsVitsModelConfig);
let len = vitsModelConfig.len + 3 * 4;
let ptr = _malloc(len);
let offset = 0;
_CopyHeap(vitsModelConfig.ptr, vitsModelConfig.len, ptr + offset);
offset += vitsModelConfig.len;
setValue(ptr + offset, config.numThreads, 'i32');
offset += 4;
setValue(ptr + offset, config.debug, 'i32');
offset += 4;
let providerLen = lengthBytesUTF8(config.provider) + 1;
let buffer = _malloc(providerLen);
stringToUTF8(config.provider, buffer, providerLen);
setValue(ptr + offset, buffer, 'i8*');
return {
buffer: buffer, ptr: ptr, len: len, config: vitsModelConfig,
}
}
function initSherpaOnnxOfflineTtsConfig(config) {
let modelConfig =
initSherpaOnnxOfflineTtsModelConfig(config.offlineTtsModelConfig);
let len = modelConfig.len + 2 * 4;
let ptr = _malloc(len);
let offset = 0;
_CopyHeap(modelConfig.ptr, modelConfig.len, ptr + offset);
offset += modelConfig.len;
let ruleFstsLen = lengthBytesUTF8(config.ruleFsts) + 1;
let buffer = _malloc(ruleFstsLen);
stringToUTF8(config.ruleFsts, buffer, ruleFstsLen);
setValue(ptr + offset, buffer, 'i8*');
offset += 4;
setValue(ptr + offset, config.maxNumSentences, 'i32');
return {
buffer: buffer, ptr: ptr, len: len, config: modelConfig,
}
}
class OfflineTts {
constructor(configObj) {
let config = initSherpaOnnxOfflineTtsConfig(configObj)
let handle = _SherpaOnnxCreateOfflineTts(config.ptr);
freeConfig(config);
this.handle = handle;
this.sampleRate = _SherpaOnnxOfflineTtsSampleRate(this.handle);
this.numSpeakers = _SherpaOnnxOfflineTtsNumSpeakers(this.handle);
}
free() {
_SherpaOnnxDestroyOfflineTts(this.handle);
this.handle = 0
}
// {
// text: "hello",
// sid: 1,
// speed: 1.0
// }
generate(config) {
let textLen = lengthBytesUTF8(config.text) + 1;
let textPtr = _malloc(textLen);
stringToUTF8(config.text, textPtr, textLen);
let h = _SherpaOnnxOfflineTtsGenerate(
this.handle, textPtr, config.sid, config.speed);
let numSamples = HEAP32[h / 4 + 1];
let sampleRate = HEAP32[h / 4 + 2];
let samplesPtr = HEAP32[h / 4] / 4;
let samples = new Float32Array(numSamples);
for (let i = 0; i < numSamples; i++) {
samples[i] = HEAPF32[samplesPtr + i];
}
_SherpaOnnxDestroyOfflineTtsGeneratedAudio(h);
return {samples: samples, sampleRate: sampleRate};
}
}
function initSherpaOnnxOfflineTts() {
let offlineTtsVitsModelConfig = {
model: './model.onnx',
lexicon: '',
tokens: './tokens.txt',
dataDir: './espeak-ng-data',
noiseScale: 0.667,
noiseScaleW: 0.8,
lengthScale: 1.0,
};
let offlineTtsModelConfig = {
offlineTtsVitsModelConfig: offlineTtsVitsModelConfig,
numThreads: 1,
debug: 1,
provider: 'cpu',
};
let offlineTtsConfig = {
offlineTtsModelConfig: offlineTtsModelConfig,
ruleFsts: '',
maxNumSentences: 1,
}
return new OfflineTts(offlineTtsConfig);
}
... ...