Fangjun Kuang
Committed by GitHub

Add WebAssembly (WASM) for speech enhancement GTCRN models (#2002)

... ... @@ -144,8 +144,7 @@ jobs:
git clone https://huggingface.co/spaces/k2-fsa/web-assembly-asr-sherpa-onnx-en huggingface
cd huggingface
rm -fv *.js
rm -fv *.data
rm -rf ./*
git fetch
git pull
git merge -m "merge remote" --ff origin main
... ...
name: wasm-simd-hf-space-speech-enhancement-gtcrn
on:
push:
branches:
- wasm
- wasm-gtcrn
tags:
- 'v[0-9]+.[0-9]+.[0-9]+*'
workflow_dispatch:
concurrency:
group: wasm-simd-hf-space-speech-enhancement-gtcrn-${{ github.ref }}
cancel-in-progress: true
jobs:
wasm-simd-hf-space-speech-enhancement-gtcrn:
name: wasm gtcrn
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
shell: bash
run: |
emcc -v
echo "--------------------"
emcc --check
- name: Download model
shell: bash
run: |
cd wasm/speech-enhancement/assets
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speech-enhancement-models/gtcrn_simple.onnx
mv gtcrn_simple.onnx gtcrn.onnx
- name: build
shell: bash
run: |
./build-wasm-simd-speech-enhancement.sh
- name: collect files
shell: bash
run: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
d=sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-speech-enhancement-gtcrn
mv build-wasm-simd-speech-enhancement/install/bin/wasm/speech-enhancement $d
ls -lh $d
tar cjfv $d.tar.bz2 $d
echo "---"
ls -lh *.tar.bz2
- uses: actions/upload-artifact@v4
with:
name: wasm-speech-enhancement-gtcrn
path: ./*.tar.bz2
- name: Release
# if: github.repository_owner == 'csukuangfj' && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
overwrite: true
file: ./*.tar.bz2
repo_name: k2-fsa/sherpa-onnx
repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
tag: v1.10.46
- name: Release
if: github.repository_owner == 'k2-fsa' && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
overwrite: true
file: ./*.tar.bz2
- name: Publish to ModelScope
# if: false
env:
MS_TOKEN: ${{ secrets.MODEL_SCOPE_GIT_TOKEN }}
uses: nick-fields/retry@v2
with:
max_attempts: 20
timeout_seconds: 200
shell: bash
command: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
git config --global user.email "csukuangfj@gmail.com"
git config --global user.name "Fangjun Kuang"
rm -rf ms
export GIT_LFS_SKIP_SMUDGE=1
export GIT_CLONE_PROTECTION_ACTIVE=false
git clone http://www.modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn.git ms
cd ms
rm -fv *.js
rm -fv *.data
git fetch
git pull
git merge -m "merge remote" --ff origin main
cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-*/* .
git status
git lfs track "*.data"
git lfs track "*.wasm"
ls -lh
git add .
git commit -m "update model"
git push http://oauth2:${MS_TOKEN}@www.modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn.git
- name: Publish to huggingface
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
uses: nick-fields/retry@v2
with:
max_attempts: 20
timeout_seconds: 200
shell: bash
command: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
git config --global user.email "csukuangfj@gmail.com"
git config --global user.name "Fangjun Kuang"
rm -rf huggingface
export GIT_LFS_SKIP_SMUDGE=1
export GIT_CLONE_PROTECTION_ACTIVE=false
git clone https://huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn huggingface
cd huggingface
rm -fv *.js
rm -fv *.data
git fetch
git pull
git merge -m "merge remote" --ff origin main
cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-*/* .
git status
git lfs track "*.data"
git lfs track "*.wasm"
ls -lh
git add .
git commit -m "update model"
git push https://csukuangfj:$HF_TOKEN@huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn main
... ...
... ... @@ -38,6 +38,7 @@ option(SHERPA_ONNX_ENABLE_WASM_KWS "Whether to enable WASM for KWS" OFF)
option(SHERPA_ONNX_ENABLE_WASM_VAD "Whether to enable WASM for VAD" OFF)
option(SHERPA_ONNX_ENABLE_WASM_VAD_ASR "Whether to enable WASM for VAD+ASR" OFF)
option(SHERPA_ONNX_ENABLE_WASM_NODEJS "Whether to enable WASM for NodeJS" OFF)
option(SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT "Whether to enable WASM for speech enhancement" OFF)
option(SHERPA_ONNX_ENABLE_BINARY "Whether to build binaries" ON)
option(SHERPA_ONNX_ENABLE_TTS "Whether to build TTS related code" ON)
option(SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION "Whether to build speaker diarization related code" ON)
... ... @@ -149,6 +150,7 @@ message(STATUS "SHERPA_ONNX_ENABLE_WASM_KWS ${SHERPA_ONNX_ENABLE_WASM_KWS}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_VAD ${SHERPA_ONNX_ENABLE_WASM_VAD}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_VAD_ASR ${SHERPA_ONNX_ENABLE_WASM_VAD_ASR}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_NODEJS ${SHERPA_ONNX_ENABLE_WASM_NODEJS}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT ${SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT}")
message(STATUS "SHERPA_ONNX_ENABLE_BINARY ${SHERPA_ONNX_ENABLE_BINARY}")
message(STATUS "SHERPA_ONNX_ENABLE_TTS ${SHERPA_ONNX_ENABLE_TTS}")
message(STATUS "SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION ${SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION}")
... ... @@ -261,6 +263,12 @@ if(SHERPA_ONNX_ENABLE_WASM_VAD_ASR)
endif()
endif()
if(SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT)
if(NOT SHERPA_ONNX_ENABLE_WASM)
message(FATAL_ERROR "Please set SHERPA_ONNX_ENABLE_WASM to ON if you enable WASM for speech enhancement")
endif()
endif()
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ version to be used.")
endif()
... ...
... ... @@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
... ... @@ -24,6 +24,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
... ... @@ -31,6 +31,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
... ... @@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
#!/usr/bin/env bash
# Copyright (c) 2025 Xiaomi Corporation
#
# This script is to build sherpa-onnx for WebAssembly (Speech Enhancement)
set -ex
if [ x"$EMSCRIPTEN" == x"" ]; then
if ! command -v emcc &> /dev/null; then
echo "Please install emscripten first"
echo ""
echo "You can use the following commands to install it:"
echo ""
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
EMSCRIPTEN=$(dirname $(realpath $(which emcc)))
emcc --version
fi
fi
export EMSCRIPTEN=$EMSCRIPTEN
echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
mkdir -p build-wasm-simd-speech-enhancement
pushd build-wasm-simd-speech-enhancement
export SHERPA_ONNX_IS_USING_BUILD_WASM_SH=ON
cmake \
-DCMAKE_INSTALL_PREFIX=./install \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE=$EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake \
\
-DSHERPA_ONNX_ENABLE_PYTHON=OFF \
-DSHERPA_ONNX_ENABLE_TESTS=OFF \
-DSHERPA_ONNX_ENABLE_CHECK=OFF \
-DBUILD_SHARED_LIBS=OFF \
-DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
-DSHERPA_ONNX_ENABLE_JNI=OFF \
-DSHERPA_ONNX_ENABLE_C_API=ON \
-DSHERPA_ONNX_ENABLE_WEBSOCKET=OFF \
-DSHERPA_ONNX_ENABLE_GPU=OFF \
-DSHERPA_ONNX_ENABLE_WASM=ON \
-DSHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT=ON \
-DSHERPA_ONNX_ENABLE_BINARY=OFF \
-DSHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY=OFF \
..
make -j2
make install
ls -lh install/bin/wasm/speech-enhancement
... ...
... ... @@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
... ... @@ -30,6 +30,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
... ... @@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
... ...
... ... @@ -18,6 +18,10 @@ if(SHERPA_ONNX_ENABLE_WASM_VAD_ASR)
add_subdirectory(vad-asr)
endif()
if(SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT)
add_subdirectory(speech-enhancement)
endif()
if(SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION)
add_subdirectory(speaker-diarization)
endif()
... ...
... ... @@ -5,7 +5,6 @@
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const clearBtn = document.getElementById('clearBtn');
const hint = document.getElementById('hint');
const soundClips = document.getElementById('sound-clips');
let textArea = document.getElementById('results');
... ... @@ -16,7 +15,7 @@ let resultList = [];
clearBtn.onclick = function() {
resultList = [];
textArea.value = getDisplayResult();
textArea.scrollTop = textArea.scrollHeight; // auto scroll
textArea.scrollTop = textArea.scrollHeight; // auto scroll
};
function getDisplayResult() {
... ... @@ -37,11 +36,39 @@ function getDisplayResult() {
return ans;
}
Module = {};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.locateFile = function(path, scriptDirectory = '') {
console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
return scriptDirectory + path;
};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.setStatus = function(status) {
console.log(`status ${status}`);
const statusElement = document.getElementById('status');
if (status == "Running...") {
status = 'Model downloaded. Initializing recongizer...'
}
statusElement.textContent = status;
if (status === '') {
statusElement.style.display = 'none';
// statusElement.parentNode.removeChild(statusElement);
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.remove('loading');
});
} else {
statusElement.style.display = 'block';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.add('loading');
});
}
};
Module.onRuntimeInitialized = function() {
console.log('inited!');
hint.innerText = 'Model loaded! Please click start';
startBtn.disabled = false;
... ... @@ -53,11 +80,11 @@ let audioCtx;
let mediaStream;
let expectedSampleRate = 16000;
let recordSampleRate; // the sampleRate of the microphone
let recorder = null; // the microphone
let leftchannel = []; // TODO: Use a single channel
let recordSampleRate; // the sampleRate of the microphone
let recorder = null; // the microphone
let leftchannel = []; // TODO: Use a single channel
let recordingLength = 0; // number of samples so far
let recordingLength = 0; // number of samples so far
let recognizer = null;
let recognizer_stream = null;
... ... @@ -66,11 +93,11 @@ if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
// see https://w3c.github.io/mediacapture-main/#dom-mediadevices-getusermedia
const constraints = {audio: true};
const constraints = {audio : true};
let onSuccess = function(stream) {
if (!audioCtx) {
audioCtx = new AudioContext({sampleRate: 16000});
audioCtx = new AudioContext({sampleRate : 16000});
}
console.log(audioCtx);
recordSampleRate = audioCtx.sampleRate;
... ... @@ -120,7 +147,6 @@ if (navigator.mediaDevices.getUserMedia) {
result = recognizer.getResult(recognizer_stream).text;
}
if (result.length > 0 && lastResult != result) {
lastResult = result;
}
... ... @@ -134,7 +160,7 @@ if (navigator.mediaDevices.getUserMedia) {
}
textArea.value = getDisplayResult();
textArea.scrollTop = textArea.scrollHeight; // auto scroll
textArea.scrollTop = textArea.scrollHeight; // auto scroll
let buf = new Int16Array(samples.length);
for (var i = 0; i < samples.length; ++i) {
... ... @@ -221,9 +247,8 @@ if (navigator.mediaDevices.getUserMedia) {
};
};
let onError = function(err) {
console.log('The following error occured: ' + err);
};
let onError = function(
err) { console.log('The following error occured: ' + err); };
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
... ... @@ -231,7 +256,6 @@ if (navigator.mediaDevices.getUserMedia) {
alert('getUserMedia not supported on your browser!');
}
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function flatten(listOfSamples) {
... ... @@ -257,22 +281,22 @@ function toWav(samples) {
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
view.setUint32(8, 0x45564157, true); // format
//
view.setUint32(8, 0x45564157, true); // format
//
// t m f
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, expectedSampleRate, true); // sampleRate
view.setUint32(28, expectedSampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, expectedSampleRate, true); // sampleRate
view.setUint32(28, expectedSampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
... ... @@ -280,7 +304,7 @@ function toWav(samples) {
offset += 2;
}
return new Blob([view], {type: 'audio/wav'});
return new Blob([ view ], {type : 'audio/wav'});
}
// this function is copied from
... ...
... ... @@ -11,30 +11,70 @@
textarea {
width:100%;
}
.loading {
display: none !important;
}
</style>
</head>
<body>
<body style="font-family: 'Source Sans Pro', sans-serif; background-color: #f9fafb; color: #333; display: flex; flex-direction: column; align-items: center; height: 100vh; margin: 0;">
<h1>
Next-gen Kaldi + WebAssembly<br/>
ASR Demo with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a><br/>
(with Zipformer)
</h1>
<div>
<span id="hint">Loading model ... ...</span>
<br/>
<br/>
<button id="startBtn" disabled>Start</button>
<button id="stopBtn" disabled>Stop</button>
<button id="clearBtn">Clear</button>
<br/>
<br/>
<textarea id="results" rows="10" readonly></textarea>
<div style="width: 100%; max-width: 900px; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); flex: 1;">
<div id="status">Loading...</div>
<div id="singleAudioContent" class="tab-content loading">
<div style="display: flex; gap: 1.5rem;">
<div style="flex: 1; display: flex; flex-direction: row; align-items: center; gap: 1rem;">
<button id="startBtn" disabled>Start</button>
<button id="stopBtn" disabled>Stop</button>
<button id="clearBtn">Clear</button>
</div>
</div>
<div style="flex: 1; display: flex; flex-direction: column; gap: 1rem;">
<div style="font-size: 1rem; font-weight: bold; padding: 0.5rem 1rem; background-color: #f8f9fa; border-radius: 8px; color: #6c757d;">Transcript</div>
<textarea id="results" rows="10" placeholder="Output will appear here..." readonly style="flex: 1; padding: 0.75rem; font-size: 1rem; border: 1px solid #ced4da; border-radius: 8px; resize: none; background-color: #f8f9fa;"></textarea>
</div>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
<!-- Footer Section -->
<div style="width: 100%; max-width: 900px; margin-top: 1.5rem; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); text-align: left; font-size: 0.9rem; color: #6c757d;">
<h3>Description</h3>
<ul>
<li>Everything is <strong>open-sourced.</strong> <a href="https://github.com/k2-fsa/sherpa-onnx">code</a></li>
<li>If you have any issues, please either <a href="https://github.com/k2-fsa/sherpa-onnx/issues">file a ticket</a> or contact us via</li>
<ul>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#wechat">WeChat group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#qq">QQ group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#bilibili-b">Bilibili</a></li>
</ul>
</ul>
<h3>About This Demo</h3>
<ul>
<li><strong>Private and Secure:</strong> All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.</li>
<li><strong>Efficient Resource Usage:</strong> No GPU is required, leaving system resources available for webLLM analysis.</li>
</ul>
<h3>Latest Update</h3>
<ul>
<li>Update UI.</li>
<li>First working version.</li>
</ul>
<h3>Acknowledgement</h3>
<ul>
<li>We refer to <a href="https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm">https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm</a> for the UI part.</li>
</ul>
</div>
<script src="sherpa-onnx-asr.js"></script>
<script src="app-asr.js"></script>
... ...
... ... @@ -84,6 +84,7 @@ set(exported_functions
#
SherpaOnnxFileExists
SherpaOnnxReadWave
SherpaOnnxReadWaveFromBinaryData
SherpaOnnxFreeWave
SherpaOnnxWriteWave
)
... ...
... ... @@ -23,6 +23,36 @@ function readWave(filename, Module) {
Module._SherpaOnnxFreeWave(w);
return {samples: samples, sampleRate: sampleRate};
}
function readWaveFromBinaryData(uint8Array) {
const numBytes = uint8Array.length * uint8Array.BYTES_PER_ELEMENT;
const pointer = this.Module._malloc(numBytes);
const dataOnHeap = new Uint8Array(Module.HEAPU8.buffer, pointer, numBytes);
dataOnHeap.set(uint8Array);
const w = this.Module._SherpaOnnxReadWaveFromBinaryData(
dataOnHeap.byteOffset, numBytes);
if (w == 0) {
console.log('Failed to read wave from binary data');
return null;
}
this.Module._free(pointer);
const samplesPtr = Module.HEAP32[w / 4] / 4;
const sampleRate = Module.HEAP32[w / 4 + 1];
const numSamples = Module.HEAP32[w / 4 + 2];
const samples = new Float32Array(numSamples);
for (let i = 0; i < numSamples; i++) {
samples[i] = Module.HEAPF32[samplesPtr + i];
}
Module._SherpaOnnxFreeWave(w);
return {samples: samples, sampleRate: sampleRate};
}
... ... @@ -53,5 +83,6 @@ if (typeof process == 'object' && typeof process.versions == 'object' &&
module.exports = {
readWave,
writeWave,
readWaveFromBinaryData,
};
}
... ...
if(NOT $ENV{SHERPA_ONNX_IS_USING_BUILD_WASM_SH})
message(FATAL_ERROR "Please use ./build-wasm-simd-speech-enhancement.sh to build for wasm speech enhancement")
endif()
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/gtcrn.onnx")
message(FATAL_ERROR "Please read ${CMAKE_CURRENT_SOURCE_DIR}/assets/README.md before you continue")
endif()
set(exported_functions
MyPrint
SherpaOnnxCreateOfflineSpeechDenoiser
SherpaOnnxDestroyOfflineSpeechDenoiser
SherpaOnnxOfflineSpeechDenoiserGetSampleRate
SherpaOnnxOfflineSpeechDenoiserRun
SherpaOnnxDestroyDenoisedAudio
SherpaOnnxWriteWave
SherpaOnnxReadWave
SherpaOnnxReadWaveFromBinaryData
SherpaOnnxFreeWave
)
set(mangled_exported_functions)
foreach(x IN LISTS exported_functions)
list(APPEND mangled_exported_functions "_${x}")
endforeach()
list(JOIN mangled_exported_functions "," all_exported_functions)
include_directories(${CMAKE_SOURCE_DIR})
set(MY_FLAGS " -s FORCE_FILESYSTEM=1 -s INITIAL_MEMORY=128MB -s ALLOW_MEMORY_GROWTH=1")
string(APPEND MY_FLAGS " -sSTACK_SIZE=10485760 ") # 10MB
string(APPEND MY_FLAGS " -sEXPORTED_FUNCTIONS=[_CopyHeap,_malloc,_free,${all_exported_functions}] ")
string(APPEND MY_FLAGS "--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/assets@. ")
string(APPEND MY_FLAGS " -sEXPORTED_RUNTIME_METHODS=['ccall','stringToUTF8','setValue','getValue','lengthBytesUTF8','UTF8ToString'] ")
message(STATUS "MY_FLAGS: ${MY_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_FLAGS}")
set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${MY_FLAGS}")
if (NOT CMAKE_EXECUTABLE_SUFFIX STREQUAL ".js")
message(FATAL_ERROR "The default suffix for building executables should be .js!")
endif()
# set(CMAKE_EXECUTABLE_SUFFIX ".html")
add_executable(sherpa-onnx-wasm-main-speech-enhancement sherpa-onnx-wasm-main-speech-enhancement.cc)
target_link_libraries(sherpa-onnx-wasm-main-speech-enhancement sherpa-onnx-c-api)
install(TARGETS sherpa-onnx-wasm-main-speech-enhancement DESTINATION bin/wasm/speech-enhancement)
install(
FILES
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speech-enhancement>/sherpa-onnx-wasm-main-speech-enhancement.js"
"index.html"
"sherpa-onnx-speech-enhancement.js"
"../nodejs/sherpa-onnx-wave.js"
"app-speech-enhancement.js"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speech-enhancement>/sherpa-onnx-wasm-main-speech-enhancement.wasm"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speech-enhancement>/sherpa-onnx-wasm-main-speech-enhancement.data"
DESTINATION
bin/wasm/speech-enhancement
)
... ...
const fileInput = document.getElementById('fileInput');
let speech_denoiser = null;
const inAudioPlayback = document.getElementById('inAudioPlayback');
const outAudioPlayback = document.getElementById('outAudioPlayback');
Module = {};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.locateFile = function(path, scriptDirectory = '') {
console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
return scriptDirectory + path;
};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.setStatus = function(status) {
console.log(`status ${status}`);
const statusElement = document.getElementById('status');
statusElement.textContent = status;
if (status === '') {
statusElement.style.display = 'none';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.remove('loading');
});
} else {
statusElement.style.display = 'block';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.add('loading');
});
}
};
Module.onRuntimeInitialized = function() {
console.log('Model files downloaded!');
console.log('Initializing speech denoiser ......');
speech_denoiser = createOfflineSpeechDenoiser(Module)
};
async function process(wave) {
let denoised = speech_denoiser.run(wave.samples, wave.sampleRate);
console.log(denoised);
let int16Samples = new Int16Array(denoised.samples.length);
for (var i = 0; i < denoised.samples.length; ++i) {
let s = denoised.samples[i];
if (s >= 1)
s = 1;
else if (s <= -1)
s = -1;
int16Samples[i] = s * 32767;
}
let blob = toWav(int16Samples, denoised.sampleRate);
const objectUrl = URL.createObjectURL(blob);
console.log(objectUrl);
outAudioPlayback.src = objectUrl;
outAudioPlayback.controls = true;
outAudioPlayback.style.display = 'block';
}
fileInput.addEventListener('change', function(event) {
if (!event.target.files || !event.target.files[0]) {
console.log('No file selected.');
return;
}
const file = event.target.files[0];
console.log('Selected file:', file.name, file.type, file.size, 'bytes');
const reader = new FileReader();
reader.onload = function(ev) {
console.log('FileReader onload called.');
const arrayBuffer = ev.target.result;
console.log('ArrayBuffer length:', arrayBuffer.byteLength);
const uint8Array = new Uint8Array(arrayBuffer);
const wave = readWaveFromBinaryData(uint8Array);
if (wave == null) {
alert(
`${file.name} is not a valid .wav file. Please select a *.wav file`);
return;
}
var url = URL.createObjectURL(file);
console.log(`url: ${url}`);
inAudioPlayback.src = url;
inAudioPlayback.style.display = 'block';
process(wave)
console.log('process done')
};
reader.onerror = function(err) {
console.error('FileReader error:', err);
};
console.log('Starting FileReader.readAsArrayBuffer...');
reader.readAsArrayBuffer(file);
});
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function toWav(samples, sampleRate) {
let buf = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buf);
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
view.setUint32(8, 0x45564157, true); // format
//
// t m f
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, sampleRate, true); // sampleRate
view.setUint32(28, sampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
view.setInt16(offset, samples[i], true);
offset += 2;
}
return new Blob([view], {type: 'audio/wav'});
}
... ...
# Introduction
## Huggingface space
You can visit https://huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn
to try it in your browser without building or installing anything.
You can also visit
https://modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn
## Usage
Please refer to
https://github.com/k2-fsa/sherpa-onnx/releases/tag/speech-enhancement-models
to download a model.
The following is an example:
```bash
cd sherpa-onnx/wasm/speech-enhancement/assets
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/speech-enhancement-models/gtcrn_simple.onnx
mv gtcrn_simple.onnx gtcrn.onnx
```
You should have the following files in `assets` before you can run
`build-wasm-simd-speech-enhancement.sh`
```
(py38) fangjuns-MacBook-Pro:assets fangjun$ tree .
.
├── README.md
└── gtcrn.onnx
0 directories, 2 files
(py38) fangjuns-MacBook-Pro:assets fangjun$ ls -lh
total 1056
-rw-r--r-- 1 fangjun staff 466B Mar 12 16:13 README.md
-rw-r--r-- 1 fangjun staff 523K Mar 12 16:14 gtcrn.onnx
```
... ...
<html lang="en">
<!--
The UI code is modified from
https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm
-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width" />
<title>Next-gen Kaldi WebAssembly with sherpa-onnx for speech enhancement</title>
<style>
h1,div {
text-align: center;
}
textarea {
width:100%;
}
.loading {
display: none !important;
}
</style>
</head>
<body>
<h1>
Next-gen Kaldi + WebAssembly<br/>
Speech Enhancement with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a><br/>
using <a href="https://github.com/Xiaobin-Rong/gtcrn">GTCRN</a>
</h1>
<div id="status">Loading...</div>
<div id="singleAudioContent" class="tab-content loading">
<div style="display: flex; gap: 1.5rem;">
<!-- Input Section -->
<div style="flex: 1; display: flex; flex-direction: column; gap: 1rem;">
<div style="font-size: 1rem; font-weight: bold; padding: 0.5rem 1rem; background-color: #f8f9fa; border-radius: 8px; display: flex; align-items: center; gap: 0.5rem; color: #6c757d;">
<span style="line-height: 1;">🎵</span> Input
</div>
<!-- Drag and Drop / File Upload -->
<div id="dropzone" style="border: 2px dashed #ced4da; border-radius: 8px; padding: 2rem; text-align: center; color: #6c757d; cursor: pointer; background-color: #f8f9fa; transition: background-color 0.3s, border-color 0.3s; position: relative;">
<input type="file" id="fileInput" accept=".wav" style="position: absolute; top: 0; left: 0; opacity: 0; width: 100%; height: 100%; cursor: pointer;" />
<p style="margin: 0;">Drop Audio Here (*.wav)<br>- or -<br>Click to Upload</p>
</div>
<audio id="inAudioPlayback" controls style="display: none; margin-top: 1rem; width: 100%;"></audio>
</div>
</div>
<div style="display: flex; gap: 1.5rem;">
<!-- Output Section -->
<div style="flex: 1; display: flex; flex-direction: column; gap: 1rem;">
<div style="font-size: 1rem; font-weight: bold; padding: 0.5rem 1rem; background-color: #f8f9fa; border-radius: 8px; display: flex; align-items: center; gap: 0.5rem; color: #6c757d;">
<span style="line-height: 1;">🎵</span> Output
</div>
<audio id="outAudioPlayback" controls style="display: none; margin-top: 1rem; width: 100%;"></audio>
</div>
</div>
<!-- Footer Section -->
<div style="width: 100%; max-width: 900px; margin-top: 1.5rem; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); text-align: left; font-size: 0.9rem; color: #6c757d;">
<h3>Description</h3>
<ul>
<li>Everything is <strong>open-sourced.</strong> <a href="https://github.com/k2-fsa/sherpa-onnx">code</a></li>
<li>The model is from <a href="https://github.com/Xiaobin-Rong/gtcrn">GTCRN</a></li>
<li>Please upload .wav files</li>
<ul>
<li>You can download noisy test wave files from <a href="https://htmlpreview.github.io/?https://github.com/Xiaobin-Rong/gtcrn_demo/blob/main/index.html">https://htmlpreview.github.io/?https://github.com/Xiaobin-Rong/gtcrn_demo/blob/main/index.html</a></li>
</ul>
<li>If you have any issues, please either <a href="https://github.com/k2-fsa/sherpa-onnx/issues">file a ticket</a> or contact us via</li>
<ul>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#wechat">WeChat group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#qq">QQ group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#bilibili-b">Bilibili</a></li>
</ul>
</ul>
<h3>About This Demo</h3>
<ul>
<li><strong>Private and Secure:</strong> All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.</li>
<li><strong>Efficient Resource Usage:</strong> No GPU is required, leaving system resources available for webLLM analysis.</li>
</ul>
<h3>Latest Update</h3>
<ul>
<li>First working version.</li>
</ul>
<h3>Acknowledgement</h3>
<ul>
<li>We refer to <a href="https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm">https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm</a> for the UI part.</li>
</ul>
</div>
<script src="app-speech-enhancement.js"></script>
<script src="sherpa-onnx-wave.js"></script>
<script src="sherpa-onnx-speech-enhancement.js"></script>
<script src="sherpa-onnx-wasm-main-speech-enhancement.js"></script>
</body>
... ...
function freeConfig(config, Module) {
if ('buffer' in config) {
Module._free(config.buffer);
}
if ('config' in config) {
freeConfig(config.config, Module)
}
if ('gtcrn' in config) {
freeConfig(config.gtcrn, Module)
}
Module._free(config.ptr);
}
function initSherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig(config, Module) {
if (!('model' in config)) {
config.model = '';
}
const modelLen = Module.lengthBytesUTF8(config.model) + 1;
const n = modelLen;
const buffer = Module._malloc(n);
const len = 1 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module.stringToUTF8(config.model, buffer + offset, modelLen);
offset += modelLen;
offset = 0;
Module.setValue(ptr, buffer + offset, 'i8*');
offset += modelLen;
return {
buffer: buffer, ptr: ptr, len: len,
}
}
function initSherpaOnnxOfflineSpeechDenoiserModelConfig(config, Module) {
if (!('gtcrn' in config)) {
config.gtcrn = {model: ''};
}
const gtcrn =
initSherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig(config.gtcrn, Module);
const len = gtcrn.len + 3 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module._CopyHeap(gtcrn.ptr, gtcrn.len, ptr + offset);
offset += gtcrn.len;
Module.setValue(ptr + offset, config.numThreads || 1, 'i32');
offset += 4;
Module.setValue(ptr + offset, config.debug || 0, 'i32');
offset += 4;
const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
const buffer = Module._malloc(providerLen);
Module.stringToUTF8(config.provider || 'cpu', buffer, providerLen);
Module.setValue(ptr + offset, buffer, 'i8*');
offset += 4;
return {buffer: buffer, ptr: ptr, len: len, gtcrn: gtcrn};
}
function initSherpaOnnxOfflineSpeechDenoiserConfig(config, Module) {
if (!('model' in config)) {
config.model = {
gtcrn: {model: ''},
provider: 'cpu',
debug: 1,
numThreads: 1,
};
}
const modelConfig =
initSherpaOnnxOfflineSpeechDenoiserModelConfig(config.model, Module);
const len = modelConfig.len;
const ptr = Module._malloc(len);
let offset = 0;
Module._CopyHeap(modelConfig.ptr, modelConfig.len, ptr + offset);
offset += modelConfig.len;
return {
ptr: ptr, len: len, config: modelConfig,
}
}
class OfflineSpeechDenoiser {
constructor(configObj, Module) {
console.log(configObj)
const config = initSherpaOnnxOfflineSpeechDenoiserConfig(configObj, Module)
// Module._MyPrint(config.ptr);
const handle = Module._SherpaOnnxCreateOfflineSpeechDenoiser(config.ptr);
freeConfig(config, Module);
this.handle = handle;
this.sampleRate =
Module._SherpaOnnxOfflineSpeechDenoiserGetSampleRate(this.handle);
this.Module = Module
}
free() {
this.Module._SherpaOnnxDestroyOfflineSpeechDenoiser(this.handle);
this.handle = 0
}
/**
* @param samples {Float32Array} Containing samples in the range [-1, 1]
* @param sampleRate {Number}
*/
run(samples, sampleRate) {
const pointer =
this.Module._malloc(samples.length * samples.BYTES_PER_ELEMENT);
this.Module.HEAPF32.set(samples, pointer / samples.BYTES_PER_ELEMENT);
const h = this.Module._SherpaOnnxOfflineSpeechDenoiserRun(
this.handle, pointer, samples.length, sampleRate);
this.Module._free(pointer);
const numSamples = this.Module.HEAP32[h / 4 + 1];
const denoisedSampleRate = this.Module.HEAP32[h / 4 + 2];
const samplesPtr = this.Module.HEAP32[h / 4] / 4;
const denoisedSamples = new Float32Array(numSamples);
for (let i = 0; i < numSamples; i++) {
denoisedSamples[i] = this.Module.HEAPF32[samplesPtr + i];
}
this.Module._SherpaOnnxDestroyDenoisedAudio(h);
return {samples: denoisedSamples, sampleRate: denoisedSampleRate};
}
save(filename, audio) {
const samples = audio.samples;
const sampleRate = audio.sampleRate;
const ptr = this.Module._malloc(samples.length * 4);
for (let i = 0; i < samples.length; i++) {
this.Module.HEAPF32[ptr / 4 + i] = samples[i];
}
const filenameLen = this.Module.lengthBytesUTF8(filename) + 1;
const buffer = this.Module._malloc(filenameLen);
this.Module.stringToUTF8(filename, buffer, filenameLen);
this.Module._SherpaOnnxWriteWave(ptr, samples.length, sampleRate, buffer);
this.Module._free(buffer);
this.Module._free(ptr);
}
}
function createOfflineSpeechDenoiser(Module, myConfig) {
let config = {
model: {
gtcrn: {model: './gtcrn.onnx'},
debug: 0,
},
};
if (myConfig) {
config = myConfig;
}
return new OfflineSpeechDenoiser(config, Module);
}
if (typeof process == 'object' && typeof process.versions == 'object' &&
typeof process.versions.node == 'string') {
module.exports = {
createOfflineSpeechDenoiser,
};
}
... ...
// wasm/sherpa-onnx-wasm-main-speech-enhancement.cc
//
// Copyright (c) 2025 Xiaomi Corporation
#include <stdio.h>
#include <algorithm>
#include <memory>
#include "sherpa-onnx/c-api/c-api.h"
// see also
// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html
extern "C" {
static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig) == 1 * 4,
"");
static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserModelConfig) ==
sizeof(SherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig) +
3 * 4,
"");
static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserConfig) ==
sizeof(SherpaOnnxOfflineSpeechDenoiserModelConfig),
"");
void MyPrint(SherpaOnnxOfflineSpeechDenoiserConfig *config) {
auto model = &config->model;
auto gtcrn = &model->gtcrn;
fprintf(stdout, "----------offline speech denoiser model config----------\n");
fprintf(stdout, "gtcrn: %s\n", gtcrn->model);
fprintf(stdout, "num threads: %d\n", model->num_threads);
fprintf(stdout, "debug: %d\n", model->debug);
fprintf(stdout, "provider: %s\n", model->provider);
}
void CopyHeap(const char *src, int32_t num_bytes, char *dst) {
std::copy(src, src + num_bytes, dst);
}
}
... ...
../nodejs/sherpa-onnx-wave.js
\ No newline at end of file
... ...
const generateBtn = document.getElementById('generateBtn');
const hint = document.getElementById('hint');
const speakerIdLabel = document.getElementById('speakerIdLabel');
const speakerIdInput = document.getElementById('speakerId');
const speedInput = document.getElementById('speed');
... ... @@ -11,13 +10,41 @@ speedValue.innerHTML = speedInput.value;
let index = 0;
let tts = null;
let audioCtx = null;
Module = {};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.locateFile = function(path, scriptDirectory = '') {
console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
return scriptDirectory + path;
};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.setStatus = function(status) {
console.log(`status ${status}`);
const statusElement = document.getElementById('status');
if (status == "Running...") {
status = 'Model downloaded. Initializing text to speech model...'
}
statusElement.textContent = status;
if (status === '') {
statusElement.style.display = 'none';
// statusElement.parentNode.removeChild(statusElement);
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.remove('loading');
});
} else {
statusElement.style.display = 'block';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.add('loading');
});
}
};
Module.onRuntimeInitialized = function() {
console.log('Model files downloaded!');
... ... @@ -27,17 +54,10 @@ Module.onRuntimeInitialized = function() {
speakerIdLabel.innerHTML = `Speaker ID (0 - ${tts.numSpeakers - 1}):`;
}
hint.innerText =
'Initialized! Please enter text and click the Generate button.';
generateBtn.disabled = false;
};
speedInput.oninput = function() {
speedValue.innerHTML = this.value;
};
speedInput.oninput = function() { speedValue.innerHTML = this.value; };
generateBtn.onclick = function() {
let speakerId = speakerIdInput.value;
... ... @@ -69,12 +89,12 @@ generateBtn.onclick = function() {
console.log('text', text);
let audio =
tts.generate({text: text, sid: speakerId, speed: speedInput.value});
tts.generate({text : text, sid : speakerId, speed : speedInput.value});
console.log(audio.samples.length, audio.sampleRate);
if (!audioCtx) {
audioCtx = new AudioContext({sampleRate: tts.sampleRate});
audioCtx = new AudioContext({sampleRate : tts.sampleRate});
}
const buffer = audioCtx.createBuffer(1, audio.samples.length, tts.sampleRate);
... ... @@ -155,22 +175,22 @@ function toWav(floatSamples, sampleRate) {
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
view.setUint32(8, 0x45564157, true); // format
//
view.setUint32(8, 0x45564157, true); // format
//
// t m f
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, sampleRate, true); // sampleRate
view.setUint32(28, sampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, sampleRate, true); // sampleRate
view.setUint32(28, sampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
... ... @@ -178,5 +198,5 @@ function toWav(floatSamples, sampleRate) {
offset += 2;
}
return new Blob([view], {type: 'audio/wav'});
return new Blob([ view ], {type : 'audio/wav'});
}
... ...
... ... @@ -5,7 +5,7 @@ https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
to download a model.
The following is an example:
```
```bash
cd sherpa-onnx/wasm/tts/assets
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-libritts_r-medium.tar.bz2
... ...
... ... @@ -11,34 +11,70 @@
textarea {
width:100%;
}
.loading {
display: none !important;
}
</style>
</head>
<body>
<body style="font-family: 'Source Sans Pro', sans-serif; background-color: #f9fafb; color: #333; display: flex; flex-direction: column; align-items: center; height: 100vh; margin: 0;">
<h1>
Next-gen Kaldi + WebAssembly<br/>
Text-to-speech Demo with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a>
</h1>
<div>
<span id="hint">Loading model ... ...</span>
<br/>
<br/>
<label for="speakerId" id="speakerIdLabel">Speaker ID: </label>
<input type="text" id="speakerId" name="speakerId" value="0" />
<br/>
<br/>
<label for="speed" id="speedLabel">Speed: </label>
<input type="range" id="speed" name="speed" min="0.4" max="3.5" step="0.1" value="1.0" />
<span id="speedValue"></span>
<br/>
<br/>
<textarea id="text" rows="10" placeholder="Please enter your text here and click the Generate button"></textarea>
<br/>
<br/>
<button id="generateBtn" disabled>Generate</button>
<div style="width: 100%; max-width: 900px; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); flex: 1;">
<div id="status">Loading...</div>
<div id="singleAudioContent" class="tab-content loading">
<label for="speakerId" id="speakerIdLabel">Speaker ID: </label>
<input type="text" id="speakerId" name="speakerId" value="0" />
<br/>
<br/>
<label for="speed" id="speedLabel">Speed: </label>
<input type="range" id="speed" name="speed" min="0.4" max="3.5" step="0.1" value="1.0" />
<span id="speedValue"></span>
<br/>
<br/>
<textarea id="text" rows="10" placeholder="Please enter your text here and click the Generate button"></textarea>
<br/>
<br/>
<button id="generateBtn" disabled>Generate</button>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
<!-- Footer Section -->
<div style="width: 100%; max-width: 900px; margin-top: 1.5rem; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); text-align: left; font-size: 0.9rem; color: #6c757d;">
<h3>Description</h3>
<ul>
<li>Everything is <strong>open-sourced.</strong> <a href="https://github.com/k2-fsa/sherpa-onnx">code</a></li>
<li>If you have any issues, please either <a href="https://github.com/k2-fsa/sherpa-onnx/issues">file a ticket</a> or contact us via</li>
<ul>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#wechat">WeChat group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#qq">QQ group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#bilibili-b">Bilibili</a></li>
</ul>
</ul>
<h3>About This Demo</h3>
<ul>
<li><strong>Private and Secure:</strong> All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.</li>
<li><strong>Efficient Resource Usage:</strong> No GPU is required, leaving system resources available for webLLM analysis.</li>
</ul>
<h3>Latest Update</h3>
<ul>
<li>Update UI.</li>
<li>First working version.</li>
</ul>
<h3>Acknowledgement</h3>
<ul>
<li>We refer to <a href="https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm">https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm</a> for the UI part.</li>
</ul>
</div>
<script src="app-tts.js"></script>
<script src="sherpa-onnx-tts.js"></script>
... ...
... ... @@ -263,7 +263,7 @@ function initSherpaOnnxOfflineTtsModelConfig(config, Module) {
const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
const buffer = Module._malloc(providerLen);
Module.stringToUTF8(config.provider, buffer, providerLen);
Module.stringToUTF8(config.provider || 'cpu', buffer, providerLen);
Module.setValue(ptr + offset, buffer, 'i8*');
offset += 4;
... ...
... ... @@ -5,7 +5,6 @@
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const clearBtn = document.getElementById('clearBtn');
const hint = document.getElementById('hint');
const soundClips = document.getElementById('sound-clips');
let textArea = document.getElementById('results');
... ... @@ -16,7 +15,7 @@ let resultList = [];
clearBtn.onclick = function() {
resultList = [];
textArea.value = getDisplayResult();
textArea.scrollTop = textArea.scrollHeight; // auto scroll
textArea.scrollTop = textArea.scrollHeight; // auto scroll
};
function getDisplayResult() {
... ... @@ -41,19 +40,17 @@ function getDisplayResult() {
return ans;
}
Module = {};
let audioCtx;
let mediaStream;
let expectedSampleRate = 16000;
let recordSampleRate; // the sampleRate of the microphone
let recorder = null; // the microphone
let leftchannel = []; // TODO: Use a single channel
let recordSampleRate; // the sampleRate of the microphone
let recorder = null; // the microphone
let leftchannel = []; // TODO: Use a single channel
let recordingLength = 0; // number of samples so far
let recordingLength = 0; // number of samples so far
let vad = null;
let buffer = null;
... ... @@ -76,47 +73,47 @@ function createOfflineRecognizerSenseVoice() {}
function initOfflineRecognizer() {
let config = {
modelConfig: {
debug: 1,
tokens: './tokens.txt',
modelConfig : {
debug : 1,
tokens : './tokens.txt',
},
};
if (fileExists('sense-voice.onnx') == 1) {
config.modelConfig.senseVoice = {
model: './sense-voice.onnx',
useInverseTextNormalization: 1,
model : './sense-voice.onnx',
useInverseTextNormalization : 1,
};
} else if (fileExists('whisper-encoder.onnx')) {
config.modelConfig.whisper = {
encoder: './whisper-encoder.onnx',
decoder: './whisper-decoder.onnx',
encoder : './whisper-encoder.onnx',
decoder : './whisper-decoder.onnx',
};
} else if (fileExists('transducer-encoder.onnx')) {
config.modelConfig.transducer = {
encoder: './transducer-encoder.onnx',
decoder: './transducer-decoder.onnx',
joiner: './transducer-joiner.onnx',
encoder : './transducer-encoder.onnx',
decoder : './transducer-decoder.onnx',
joiner : './transducer-joiner.onnx',
};
config.modelConfig.modelType = 'transducer';
} else if (fileExists('nemo-transducer-encoder.onnx')) {
config.modelConfig.transducer = {
encoder: './nemo-transducer-encoder.onnx',
decoder: './nemo-transducer-decoder.onnx',
joiner: './nemo-transducer-joiner.onnx',
encoder : './nemo-transducer-encoder.onnx',
decoder : './nemo-transducer-decoder.onnx',
joiner : './nemo-transducer-joiner.onnx',
};
config.modelConfig.modelType = 'nemo_transducer';
} else if (fileExists('paraformer.onnx')) {
config.modelConfig.paraformer = {
model: './paraformer.onnx',
model : './paraformer.onnx',
};
} else if (fileExists('telespeech.onnx')) {
config.modelConfig.telespeechCtc = './telespeech.onnx';
} else if (fileExists('moonshine-preprocessor.onnx')) {
config.modelConfig.moonshine = {
preprocessor: './moonshine-preprocessor.onnx',
encoder: './moonshine-encoder.onnx',
uncachedDecoder: './moonshine-uncached-decoder.onnx',
cachedDecoder: './moonshine-cached-decoder.onnx'
preprocessor : './moonshine-preprocessor.onnx',
encoder : './moonshine-encoder.onnx',
uncachedDecoder : './moonshine-uncached-decoder.onnx',
cachedDecoder : './moonshine-cached-decoder.onnx'
};
} else {
console.log('Please specify a model.');
... ... @@ -126,9 +123,37 @@ function initOfflineRecognizer() {
recognizer = new OfflineRecognizer(config, Module);
}
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.locateFile = function(path, scriptDirectory = '') {
console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
return scriptDirectory + path;
};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.setStatus = function(status) {
console.log(`status ${status}`);
const statusElement = document.getElementById('status');
if (status == "Running...") {
status = 'Model downloaded. Initializing recongizer...'
}
statusElement.textContent = status;
if (status === '') {
statusElement.style.display = 'none';
// statusElement.parentNode.removeChild(statusElement);
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.remove('loading');
});
} else {
statusElement.style.display = 'block';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.add('loading');
});
}
};
Module.onRuntimeInitialized = function() {
console.log('inited!');
hint.innerText = 'Model loaded! Please click start';
startBtn.disabled = false;
... ... @@ -141,17 +166,15 @@ Module.onRuntimeInitialized = function() {
initOfflineRecognizer();
};
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
// see https://w3c.github.io/mediacapture-main/#dom-mediadevices-getusermedia
const constraints = {audio: true};
const constraints = {audio : true};
let onSuccess = function(stream) {
if (!audioCtx) {
audioCtx = new AudioContext({sampleRate: expectedSampleRate});
audioCtx = new AudioContext({sampleRate : expectedSampleRate});
}
console.log(audioCtx);
recordSampleRate = audioCtx.sampleRate;
... ... @@ -219,7 +242,6 @@ if (navigator.mediaDevices.getUserMedia) {
resultList.push(durationStr);
// now save the segment to a wav file
let buf = new Int16Array(segment.samples.length);
for (var i = 0; i < segment.samples.length; ++i) {
... ... @@ -277,7 +299,7 @@ if (navigator.mediaDevices.getUserMedia) {
}
textArea.value = getDisplayResult();
textArea.scrollTop = textArea.scrollHeight; // auto scroll
textArea.scrollTop = textArea.scrollHeight; // auto scroll
};
startBtn.onclick = function() {
... ... @@ -308,9 +330,8 @@ if (navigator.mediaDevices.getUserMedia) {
};
};
let onError = function(err) {
console.log('The following error occured: ' + err);
};
let onError = function(
err) { console.log('The following error occured: ' + err); };
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
... ... @@ -318,7 +339,6 @@ if (navigator.mediaDevices.getUserMedia) {
alert('getUserMedia not supported on your browser!');
}
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function flatten(listOfSamples) {
... ... @@ -344,22 +364,22 @@ function toWav(samples) {
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
view.setUint32(8, 0x45564157, true); // format
//
view.setUint32(8, 0x45564157, true); // format
//
// t m f
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, expectedSampleRate, true); // sampleRate
view.setUint32(28, expectedSampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, expectedSampleRate, true); // sampleRate
view.setUint32(28, expectedSampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
... ... @@ -367,7 +387,7 @@ function toWav(samples) {
offset += 2;
}
return new Blob([view], {type: 'audio/wav'});
return new Blob([ view ], {type : 'audio/wav'});
}
// this function is copied from
... ...
... ... @@ -11,30 +11,68 @@
textarea {
width:100%;
}
.loading {
display: none !important;
}
</style>
</head>
<body>
<body style="font-family: 'Source Sans Pro', sans-serif; background-color: #f9fafb; color: #333; display: flex; flex-direction: column; align-items: center; height: 100vh; margin: 0;">
<h1>
Next-gen Kaldi + WebAssembly<br/>
VAD+ASR Demo with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a><br/>
(with Zipformer)
</h1>
<div>
<span id="hint">Loading model ... ...</span>
<br/>
<br/>
<button id="startBtn" disabled>Start</button>
<button id="stopBtn" disabled>Stop</button>
<button id="clearBtn">Clear</button>
<br/>
<br/>
<textarea id="results" rows="10" readonly></textarea>
<div style="width: 100%; max-width: 900px; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); flex: 1;">
<div id="status">Loading...</div>
<div id="singleAudioContent" class="tab-content loading">
<div style="display: flex; gap: 1.5rem;">
<div style="flex: 1; display: flex; flex-direction: row; align-items: center; gap: 1rem;">
<button id="startBtn" disabled>Start</button>
<button id="stopBtn" disabled>Stop</button>
<button id="clearBtn">Clear</button>
</div>
</div>
<div style="flex: 1; display: flex; flex-direction: column; gap: 1rem;">
<div style="font-size: 1rem; font-weight: bold; padding: 0.5rem 1rem; background-color: #f8f9fa; border-radius: 8px; color: #6c757d;">Transcript</div>
<textarea id="results" rows="10" placeholder="Output will appear here..." readonly style="flex: 1; padding: 0.75rem; font-size: 1rem; border: 1px solid #ced4da; border-radius: 8px; resize: none; background-color: #f8f9fa;"></textarea>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
</div>
<section flex="1" overflow="auto" id="sound-clips">
</section>
<!-- Footer Section -->
<div style="width: 100%; max-width: 900px; margin-top: 1.5rem; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); text-align: left; font-size: 0.9rem; color: #6c757d;">
<h3>Description</h3>
<ul>
<li>Everything is <strong>open-sourced.</strong> <a href="https://github.com/k2-fsa/sherpa-onnx">code</a></li>
<li>If you have any issues, please either <a href="https://github.com/k2-fsa/sherpa-onnx/issues">file a ticket</a> or contact us via</li>
<ul>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#wechat">WeChat group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#qq">QQ group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#bilibili-b">Bilibili</a></li>
</ul>
</ul>
<h3>About This Demo</h3>
<ul>
<li><strong>Private and Secure:</strong> All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.</li>
<li><strong>Efficient Resource Usage:</strong> No GPU is required, leaving system resources available for webLLM analysis.</li>
</ul>
<h3>Latest Update</h3>
<ul>
<li>Update UI.</li>
<li>First working version.</li>
</ul>
<h3>Acknowledgement</h3>
<ul>
<li>We refer to <a href="https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm">https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm</a> for the UI part.</li>
</ul>
</div>
<script src="sherpa-onnx-asr.js"></script>
<script src="sherpa-onnx-vad.js"></script>
... ...