Fangjun Kuang
Committed by GitHub

WebAssembly exmaple for speaker diarization (#1411)

正在显示 37 个修改的文件 包含 1008 行增加24 行删除
... ... @@ -29,7 +29,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -28,7 +28,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -29,7 +29,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -29,7 +29,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
name: wasm-simd-hf-space-speaker-diarization
on:
push:
branches:
- wasm
- wasm-speaker-diarization
tags:
- 'v[0-9]+.[0-9]+.[0-9]+*'
workflow_dispatch:
concurrency:
group: wasm-simd-hf-space-speaker-diarization-${{ github.ref }}
cancel-in-progress: true
jobs:
wasm-simd-hf-space-speaker-diarization:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
shell: bash
run: |
emcc -v
echo "--------------------"
emcc --check
- name: Download model files
shell: bash
run: |
cd wasm/speaker-diarization/assets/
ls -lh
echo "----------"
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speaker-segmentation-models/sherpa-onnx-pyannote-segmentation-3-0.tar.bz2
tar xvf sherpa-onnx-pyannote-segmentation-3-0.tar.bz2
rm sherpa-onnx-pyannote-segmentation-3-0.tar.bz2
mv sherpa-onnx-pyannote-segmentation-3-0/model.onnx ./segmentation.onnx
rm -rf sherpa-onnx-pyannote-segmentation-3-0
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speaker-recongition-models/3dspeaker_speech_eres2net_base_sv_zh-cn_3dspeaker_16k.onnx
mv 3dspeaker_speech_eres2net_base_sv_zh-cn_3dspeaker_16k.onnx ./embedding.onnx
echo "----------"
ls -lh
- name: Build sherpa-onnx for WebAssembly
shell: bash
run: |
./build-wasm-simd-speaker-diarization.sh
- name: collect files
shell: bash
run: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
dst=sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-speaker-diarization
mv build-wasm-simd-speaker-diarization/install/bin/wasm/speaker-diarization $dst
ls -lh $dst
tar cjfv $dst.tar.bz2 ./$dst
- name: Upload wasm files
uses: actions/upload-artifact@v4
with:
name: sherpa-onnx-wasm-simd-speaker-diarization
path: ./sherpa-onnx-wasm-simd-*.tar.bz2
- name: Release
if: (github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa') && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
overwrite: true
file: ./*.tar.bz2
- name: Publish to ModelScope
# if: false
env:
MS_TOKEN: ${{ secrets.MODEL_SCOPE_GIT_TOKEN }}
uses: nick-fields/retry@v2
with:
max_attempts: 20
timeout_seconds: 200
shell: bash
command: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
git config --global user.email "csukuangfj@gmail.com"
git config --global user.name "Fangjun Kuang"
rm -rf ms
export GIT_LFS_SKIP_SMUDGE=1
export GIT_CLONE_PROTECTION_ACTIVE=false
git clone https://www.modelscope.cn/studios/csukuangfj/web-assembly-speaker-diarization-sherpa-onnx.git ms
cd ms
rm -fv *.js
rm -fv *.data
git fetch
git pull
git merge -m "merge remote" --ff origin main
cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-*/* .
git status
git lfs track "*.data"
git lfs track "*.wasm"
ls -lh
git add .
git commit -m "update model"
git push https://oauth2:${MS_TOKEN}@www.modelscope.cn/studios/csukuangfj/web-assembly-speaker-diarization-sherpa-onnx.git
- name: Publish to huggingface
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
uses: nick-fields/retry@v2
with:
max_attempts: 20
timeout_seconds: 200
shell: bash
command: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
git config --global user.email "csukuangfj@gmail.com"
git config --global user.name "Fangjun Kuang"
rm -rf huggingface
export GIT_LFS_SKIP_SMUDGE=1
export GIT_CLONE_PROTECTION_ACTIVE=false
git clone https://csukuangfj:$HF_TOKEN@huggingface.co/spaces/k2-fsa/web-assembly-speaker-diarization-sherpa-onnx huggingface
ls -lh
cd huggingface
rm -fv *.js
rm -fv *.data
git fetch
git pull
git merge -m "merge remote" --ff origin main
cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-*/* .
git status
git lfs track "*.data"
git lfs track "*.wasm"
ls -lh
git add .
git commit -m "update model"
git push https://csukuangfj:$HF_TOKEN@huggingface.co/spaces/k2-fsa/web-assembly-speaker-diarization-sherpa-onnx main
... ...
... ... @@ -37,7 +37,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -29,7 +29,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -29,7 +29,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -29,7 +29,7 @@ jobs:
- name: Install emsdk
uses: mymindstorm/setup-emsdk@v14
with:
version: 3.1.51
version: 3.1.53
actions-cache-folder: 'emsdk-cache'
- name: View emsdk version
... ...
... ... @@ -32,6 +32,7 @@ option(SHERPA_ONNX_ENABLE_WEBSOCKET "Whether to build webscoket server/client" O
option(SHERPA_ONNX_ENABLE_GPU "Enable ONNX Runtime GPU support" OFF)
option(SHERPA_ONNX_ENABLE_DIRECTML "Enable ONNX Runtime DirectML support" OFF)
option(SHERPA_ONNX_ENABLE_WASM "Whether to enable WASM" OFF)
option(SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION "Whether to enable WASM for speaker diarization" OFF)
option(SHERPA_ONNX_ENABLE_WASM_TTS "Whether to enable WASM for TTS" OFF)
option(SHERPA_ONNX_ENABLE_WASM_ASR "Whether to enable WASM for ASR" OFF)
option(SHERPA_ONNX_ENABLE_WASM_KWS "Whether to enable WASM for KWS" OFF)
... ... @@ -135,6 +136,7 @@ message(STATUS "SHERPA_ONNX_ENABLE_C_API ${SHERPA_ONNX_ENABLE_C_API}")
message(STATUS "SHERPA_ONNX_ENABLE_WEBSOCKET ${SHERPA_ONNX_ENABLE_WEBSOCKET}")
message(STATUS "SHERPA_ONNX_ENABLE_GPU ${SHERPA_ONNX_ENABLE_GPU}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM ${SHERPA_ONNX_ENABLE_WASM}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION ${SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_TTS ${SHERPA_ONNX_ENABLE_WASM_TTS}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_ASR ${SHERPA_ONNX_ENABLE_WASM_ASR}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_KWS ${SHERPA_ONNX_ENABLE_WASM_KWS}")
... ... @@ -196,9 +198,19 @@ else()
add_definitions(-DSHERPA_ONNX_ENABLE_DIRECTML=0)
endif()
if(SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION)
if(NOT SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION)
message(FATAL_ERROR "Please set SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION to ON if you want to build WASM for speaker diarization")
endif()
if(NOT SHERPA_ONNX_ENABLE_WASM)
message(FATAL_ERROR "Please set SHERPA_ONNX_ENABLE_WASM to ON if you enable WASM for speaker diarization")
endif()
endif()
if(SHERPA_ONNX_ENABLE_WASM_TTS)
if(NOT SHERPA_ONNX_ENABLE_TTS)
message(FATAL_ERROR "Please set SHERPA_ONNX_ENABLE_TTS to ON if you want to build wasm TTS")
message(FATAL_ERROR "Please set SHERPA_ONNX_ENABLE_TTS to ON if you want to build WASM for TTS")
endif()
if(NOT SHERPA_ONNX_ENABLE_WASM)
... ...
... ... @@ -116,6 +116,7 @@ We also have spaces built using WebAssembly. They are listed below:
|VAD + speech recognition (English + Chinese, 及多种中文方言) with Paraformer-small |[Click me][wasm-hf-vad-asr-zh-en-paraformer-small]| [地址][wasm-ms-vad-asr-zh-en-paraformer-small]|
|Speech synthesis (English) |[Click me][wasm-hf-tts-piper-en]| [地址][wasm-ms-tts-piper-en]|
|Speech synthesis (German) |[Click me][wasm-hf-tts-piper-de]| [地址][wasm-ms-tts-piper-de]|
|Speaker diarization |[Click me][wasm-hf-speaker-diarization]|[地址][wasm-ms-speaker-diarization]|
### Links for pre-built Android APKs
... ... @@ -173,6 +174,7 @@ We also have spaces built using WebAssembly. They are listed below:
| Speaker identification (Speaker ID) | [Address][sid-models] |
| Spoken language identification (Language ID)| See multi-lingual [Whisper][Whisper] ASR models from [Speech recognition][asr-models]|
| Punctuation | [Address][punct-models] |
| Speaker segmentation | [Address][speaker-segmentation-models] |
### Useful links
... ... @@ -261,6 +263,8 @@ Video demo in Chinese: [辷コシ∫き逾樊蕗菴蠑謇灘ュ玲撃∫悄豁」蠖ア蜩崎邇
[wasm-ms-tts-piper-en]: https://modelscope.cn/studios/k2-fsa/web-assembly-tts-sherpa-onnx-en
[wasm-hf-tts-piper-de]: https://huggingface.co/spaces/k2-fsa/web-assembly-tts-sherpa-onnx-de
[wasm-ms-tts-piper-de]: https://modelscope.cn/studios/k2-fsa/web-assembly-tts-sherpa-onnx-de
[wasm-hf-speaker-diarization]: https://huggingface.co/spaces/k2-fsa/web-assembly-speaker-diarization-sherpa-onnx
[wasm-ms-speaker-diarization]: https://www.modelscope.cn/studios/csukuangfj/web-assembly-speaker-diarization-sherpa-onnx
[apk-streaming-asr]: https://k2-fsa.github.io/sherpa/onnx/android/apk.html
[apk-streaming-asr-cn]: https://k2-fsa.github.io/sherpa/onnx/android/apk-cn.html
[apk-tts]: https://k2-fsa.github.io/sherpa/onnx/tts/apk-engine.html
... ... @@ -303,5 +307,6 @@ Video demo in Chinese: [辷コシ∫き逾樊蕗菴蠑謇灘ュ玲撃∫悄豁」蠖ア蜩崎邇
[sid-models]: https://github.com/k2-fsa/sherpa-onnx/releases/tag/speaker-recongition-models
[slid-models]: https://github.com/k2-fsa/sherpa-onnx/releases/tag/speaker-recongition-models
[punct-models]: https://github.com/k2-fsa/sherpa-onnx/releases/tag/punctuation-models
[speaker-segmentation-models]: https://github.com/k2-fsa/sherpa-onnx/releases/tag/speaker-segmentation-models
[GigaSpeech]: https://github.com/SpeechColab/GigaSpeech
[WenetSpeech]: https://github.com/wenet-e2e/WenetSpeech
... ...
... ... @@ -14,8 +14,8 @@ if [ x"$EMSCRIPTEN" == x"" ]; then
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
... ...
... ... @@ -9,8 +9,8 @@ if [ x"$EMSCRIPTEN" == x"" ]; then
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
... ...
... ... @@ -16,8 +16,8 @@ if [ x"$EMSCRIPTEN" == x"" ]; then
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
... ...
#!/usr/bin/env bash
# Copyright (c) 2024 Xiaomi Corporation
#
# This script is to build sherpa-onnx for WebAssembly (speaker diarization)
set -ex
if [ x"$EMSCRIPTEN" == x"" ]; then
if ! command -v emcc &> /dev/null; then
echo "Please install emscripten first"
echo ""
echo "You can use the following commands to install it:"
echo ""
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
EMSCRIPTEN=$(dirname $(realpath $(which emcc)))
fi
fi
export EMSCRIPTEN=$EMSCRIPTEN
echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
exit 1
fi
mkdir -p build-wasm-simd-speaker-diarization
pushd build-wasm-simd-speaker-diarization
export SHERPA_ONNX_IS_USING_BUILD_WASM_SH=ON
cmake \
-DCMAKE_INSTALL_PREFIX=./install \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE=$EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake \
\
-DSHERPA_ONNX_ENABLE_PYTHON=OFF \
-DSHERPA_ONNX_ENABLE_TESTS=OFF \
-DSHERPA_ONNX_ENABLE_CHECK=OFF \
-DBUILD_SHARED_LIBS=OFF \
-DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
-DSHERPA_ONNX_ENABLE_JNI=OFF \
-DSHERPA_ONNX_ENABLE_C_API=ON \
-DSHERPA_ONNX_ENABLE_WEBSOCKET=OFF \
-DSHERPA_ONNX_ENABLE_GPU=OFF \
-DSHERPA_ONNX_ENABLE_WASM=ON \
-DSHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION=ON \
-DSHERPA_ONNX_ENABLE_BINARY=OFF \
-DSHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY=OFF \
..
make -j2
make install
ls -lh install/bin/wasm/speaker-diarization
... ...
... ... @@ -14,8 +14,8 @@ if [ x"$EMSCRIPTEN" == x"" ]; then
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
... ...
... ... @@ -15,8 +15,8 @@ if [ x"$EMSCRIPTEN" == x"" ]; then
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
... ...
... ... @@ -14,8 +14,8 @@ if [ x"$EMSCRIPTEN" == x"" ]; then
echo "git clone https://github.com/emscripten-core/emsdk.git"
echo "cd emsdk"
echo "git pull"
echo "./emsdk install latest"
echo "./emsdk activate latest"
echo "./emsdk install 3.1.53"
echo "./emsdk activate 3.1.53"
echo "source ./emsdk_env.sh"
exit 1
else
... ...
... ... @@ -16,6 +16,11 @@ namespace SherpaOnnx
_handle = new HandleRef(this, h);
}
public void SetConfig(OfflineSpeakerDiarizationConfig config)
{
SherpaOnnxOfflineSpeakerDiarizationSetConfig(_handle.Handle, ref config);
}
public OfflineSpeakerDiarizationSegment[] Process(float[] samples)
{
IntPtr result = SherpaOnnxOfflineSpeakerDiarizationProcess(_handle.Handle, samples, samples.Length);
... ... @@ -117,6 +122,9 @@ namespace SherpaOnnx
[DllImport(Dll.Filename)]
private static extern void SherpaOnnxOfflineSpeakerDiarizationDestroySegment(IntPtr handle);
[DllImport(Dll.Filename)]
private static extern void SherpaOnnxOfflineSpeakerDiarizationSetConfig(IntPtr handle, ref OfflineSpeakerDiarizationConfig config);
}
}
... ...
... ... @@ -1276,6 +1276,16 @@ func (sd *OfflineSpeakerDiarization) SampleRate() int {
return int(C.SherpaOnnxOfflineSpeakerDiarizationGetSampleRate(sd.impl))
}
// only config.Clustering is used. All other fields are ignored
func (sd *OfflineSpeakerDiarization) SetConfig(config *OfflineSpeakerDiarizationConfig) {
c := C.struct_SherpaOnnxOfflineSpeakerDiarizationConfig{}
c.clustering.num_clusters = C.int(config.Clustering.NumClusters)
c.clustering.threshold = C.float(config.Clustering.Threshold)
SherpaOnnxOfflineSpeakerDiarizationSetConfig(sd.impl, &c)
}
type OfflineSpeakerDiarizationSegment struct {
Start float32
End float32
... ...
... ... @@ -25,6 +25,11 @@ class OfflineSpeakerDiarization {
process(samples) {
return addon.offlineSpeakerDiarizationProcess(this.handle, samples);
}
setConfig(config) {
addon.offlineSpeakerDiarizationSetConfig(config);
this.config.clustering = config.clustering;
}
}
module.exports = {
... ...
... ... @@ -251,6 +251,46 @@ static Napi::Array OfflineSpeakerDiarizationProcessWrapper(
return ans;
}
static void OfflineSpeakerDiarizationSetConfigWrapper(
const Napi::CallbackInfo &info) {
Napi::Env env = info.Env();
if (info.Length() != 2) {
std::ostringstream os;
os << "Expect only 2 arguments. Given: " << info.Length();
Napi::TypeError::New(env, os.str()).ThrowAsJavaScriptException();
return;
}
if (!info[0].IsExternal()) {
Napi::TypeError::New(
env, "Argument 0 should be an offline speaker diarization pointer.")
.ThrowAsJavaScriptException();
return;
}
const SherpaOnnxOfflineSpeakerDiarization *sd =
info[0].As<Napi::External<SherpaOnnxOfflineSpeakerDiarization>>().Data();
if (!info[1].IsObject()) {
Napi::TypeError::New(env, "Expect an object as the argument")
.ThrowAsJavaScriptException();
return;
}
Napi::Object o = info[0].As<Napi::Object>();
SherpaOnnxOfflineSpeakerDiarizationConfig c;
memset(&c, 0, sizeof(c));
c.clustering = GetFastClusteringConfig(o);
SherpaOnnxOfflineSpeakerDiarizationSetConfig(sd, &c);
}
void InitNonStreamingSpeakerDiarization(Napi::Env env, Napi::Object exports) {
exports.Set(Napi::String::New(env, "createOfflineSpeakerDiarization"),
Napi::Function::New(env, CreateOfflineSpeakerDiarizationWrapper));
... ... @@ -262,4 +302,8 @@ void InitNonStreamingSpeakerDiarization(Napi::Env env, Napi::Object exports) {
exports.Set(
Napi::String::New(env, "offlineSpeakerDiarizationProcess"),
Napi::Function::New(env, OfflineSpeakerDiarizationProcessWrapper));
exports.Set(
Napi::String::New(env, "offlineSpeakerDiarizationSetConfig"),
Napi::Function::New(env, OfflineSpeakerDiarizationSetConfigWrapper));
}
... ...
... ... @@ -1749,6 +1749,20 @@ int32_t SherpaOnnxOfflineSpeakerDiarizationGetSampleRate(
return sd->impl->SampleRate();
}
void SherpaOnnxOfflineSpeakerDiarizationSetConfig(
const SherpaOnnxOfflineSpeakerDiarization *sd,
const SherpaOnnxOfflineSpeakerDiarizationConfig *config) {
sherpa_onnx::OfflineSpeakerDiarizationConfig sd_config;
sd_config.clustering.num_clusters =
SHERPA_ONNX_OR(config->clustering.num_clusters, -1);
sd_config.clustering.threshold =
SHERPA_ONNX_OR(config->clustering.threshold, 0.5);
sd->impl->SetConfig(sd_config);
}
int32_t SherpaOnnxOfflineSpeakerDiarizationResultGetNumSpeakers(
const SherpaOnnxOfflineSpeakerDiarizationResult *r) {
return r->impl.NumSpeakers();
... ...
... ... @@ -1449,6 +1449,11 @@ SHERPA_ONNX_API void SherpaOnnxDestroyOfflineSpeakerDiarization(
SHERPA_ONNX_API int32_t SherpaOnnxOfflineSpeakerDiarizationGetSampleRate(
const SherpaOnnxOfflineSpeakerDiarization *sd);
// Only config->clustering is used. All other fields are ignored
SHERPA_ONNX_API void SherpaOnnxOfflineSpeakerDiarizationSetConfig(
const SherpaOnnxOfflineSpeakerDiarization *sd,
const SherpaOnnxOfflineSpeakerDiarizationConfig *config);
SHERPA_ONNX_API typedef struct SherpaOnnxOfflineSpeakerDiarizationResult
SherpaOnnxOfflineSpeakerDiarizationResult;
... ...
... ... @@ -20,6 +20,10 @@ class OfflineSpeakerDiarizationImpl {
virtual int32_t SampleRate() const = 0;
// Note: Only config.clustering is used. All other fields in config are
// ignored
virtual void SetConfig(const OfflineSpeakerDiarizationConfig &config) = 0;
virtual OfflineSpeakerDiarizationResult Process(
const float *audio, int32_t n,
OfflineSpeakerDiarizationProgressCallback callback = nullptr,
... ...
... ... @@ -60,7 +60,7 @@ class OfflineSpeakerDiarizationPyannoteImpl
: config_(config),
segmentation_model_(config_.segmentation),
embedding_extractor_(config_.embedding),
clustering_(config_.clustering) {
clustering_(std::make_unique<FastClustering>(config_.clustering)) {
Init();
}
... ... @@ -70,6 +70,15 @@ class OfflineSpeakerDiarizationPyannoteImpl
return meta_data.sample_rate;
}
void SetConfig(const OfflineSpeakerDiarizationConfig &config) override {
if (!config.clustering.Validate()) {
SHERPA_ONNX_LOGE("Invalid clustering config. Skip it");
return;
}
clustering_ = std::make_unique<FastClustering>(config.clustering);
config_.clustering = config.clustering;
}
OfflineSpeakerDiarizationResult Process(
const float *audio, int32_t n,
OfflineSpeakerDiarizationProgressCallback callback = nullptr,
... ... @@ -105,7 +114,7 @@ class OfflineSpeakerDiarizationPyannoteImpl
ComputeEmbeddings(audio, n, chunk_speaker_samples_list_pair.second,
std::move(callback), callback_arg);
std::vector<int32_t> cluster_labels = clustering_.Cluster(
std::vector<int32_t> cluster_labels = clustering_->Cluster(
&embeddings(0, 0), embeddings.rows(), embeddings.cols());
int32_t max_cluster_index =
... ... @@ -636,7 +645,7 @@ class OfflineSpeakerDiarizationPyannoteImpl
OfflineSpeakerDiarizationConfig config_;
OfflineSpeakerSegmentationPyannoteModel segmentation_model_;
SpeakerEmbeddingExtractor embedding_extractor_;
FastClustering clustering_;
std::unique_ptr<FastClustering> clustering_;
Matrix2DInt32 powerset_mapping_;
};
... ...
... ... @@ -79,6 +79,11 @@ int32_t OfflineSpeakerDiarization::SampleRate() const {
return impl_->SampleRate();
}
void OfflineSpeakerDiarization::SetConfig(
const OfflineSpeakerDiarizationConfig &config) {
impl_->SetConfig(config);
}
OfflineSpeakerDiarizationResult OfflineSpeakerDiarization::Process(
const float *audio, int32_t n,
OfflineSpeakerDiarizationProgressCallback callback /*= nullptr*/,
... ...
... ... @@ -62,6 +62,10 @@ class OfflineSpeakerDiarization {
// Expected sample rate of the input audio samples
int32_t SampleRate() const;
// Note: Only config.clustering is used. All other fields in config are
// ignored
void SetConfig(const OfflineSpeakerDiarizationConfig &config);
OfflineSpeakerDiarizationResult Process(
const float *audio, int32_t n,
OfflineSpeakerDiarizationProgressCallback callback = nullptr,
... ...
... ... @@ -68,6 +68,7 @@ void PybindOfflineSpeakerDiarization(py::module *m) {
.def(py::init<const OfflineSpeakerDiarizationConfig &>(),
py::arg("config"))
.def_property_readonly("sample_rate", &PyClass::SampleRate)
.def("set_config", &PyClass::SetConfig, py::arg("config"))
.def(
"process",
[](const PyClass &self, const std::vector<float> samples,
... ...
... ... @@ -1161,6 +1161,11 @@ class SherpaOnnxOfflineSpeakerDiarizationWrapper {
return Int(SherpaOnnxOfflineSpeakerDiarizationGetSampleRate(impl))
}
// only config.clustering is used. All other fields are ignored
func setConfig(config: UnsafePointer<SherpaOnnxOfflineSpeakerDiarizationConfig>!) {
SherpaOnnxOfflineSpeakerDiarizationSetConfig(impl, config)
}
func process(samples: [Float]) -> [SherpaOnnxOfflineSpeakerDiarizationSegmentWrapper] {
let result = SherpaOnnxOfflineSpeakerDiarizationProcess(
impl, samples, Int32(samples.count))
... ...
... ... @@ -18,6 +18,10 @@ if(SHERPA_ONNX_ENABLE_WASM_VAD_ASR)
add_subdirectory(vad-asr)
endif()
if(SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION)
add_subdirectory(speaker-diarization)
endif()
if(SHERPA_ONNX_ENABLE_WASM_NODEJS)
add_subdirectory(nodejs)
endif()
... ...
if(NOT $ENV{SHERPA_ONNX_IS_USING_BUILD_WASM_SH})
message(FATAL_ERROR "Please use ./build-wasm-simd-speaker-diarization.sh to build for WASM for speaker diarization")
endif()
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/segmentation.onnx" OR NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/embedding.onnx")
message(FATAL_ERROR "Please read ${CMAKE_CURRENT_SOURCE_DIR}/assets/README.md before you continue")
endif()
set(exported_functions
MyPrint
SherpaOnnxCreateOfflineSpeakerDiarization
SherpaOnnxDestroyOfflineSpeakerDiarization
SherpaOnnxOfflineSpeakerDiarizationDestroyResult
SherpaOnnxOfflineSpeakerDiarizationDestroySegment
SherpaOnnxOfflineSpeakerDiarizationGetSampleRate
SherpaOnnxOfflineSpeakerDiarizationProcess
SherpaOnnxOfflineSpeakerDiarizationProcessWithCallback
SherpaOnnxOfflineSpeakerDiarizationResultGetNumSegments
SherpaOnnxOfflineSpeakerDiarizationResultSortByStartTime
SherpaOnnxOfflineSpeakerDiarizationSetConfig
)
set(mangled_exported_functions)
foreach(x IN LISTS exported_functions)
list(APPEND mangled_exported_functions "_${x}")
endforeach()
list(JOIN mangled_exported_functions "," all_exported_functions)
include_directories(${CMAKE_SOURCE_DIR})
set(MY_FLAGS " -s FORCE_FILESYSTEM=1 -s INITIAL_MEMORY=512MB -s ALLOW_MEMORY_GROWTH=1")
string(APPEND MY_FLAGS " -sSTACK_SIZE=10485760 ") # 10MB
string(APPEND MY_FLAGS " -sEXPORTED_FUNCTIONS=[_CopyHeap,_malloc,_free,${all_exported_functions}] ")
string(APPEND MY_FLAGS "--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/assets@. ")
string(APPEND MY_FLAGS " -sEXPORTED_RUNTIME_METHODS=['ccall','stringToUTF8','setValue','getValue','lengthBytesUTF8','UTF8ToString'] ")
message(STATUS "MY_FLAGS: ${MY_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_FLAGS}")
set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${MY_FLAGS}")
if (NOT CMAKE_EXECUTABLE_SUFFIX STREQUAL ".js")
message(FATAL_ERROR "The default suffix for building executables should be .js!")
endif()
# set(CMAKE_EXECUTABLE_SUFFIX ".html")
add_executable(sherpa-onnx-wasm-main-speaker-diarization sherpa-onnx-wasm-main-speaker-diarization.cc)
target_link_libraries(sherpa-onnx-wasm-main-speaker-diarization sherpa-onnx-c-api)
install(TARGETS sherpa-onnx-wasm-main-speaker-diarization DESTINATION bin/wasm/speaker-diarization)
install(
FILES
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speaker-diarization>/sherpa-onnx-wasm-main-speaker-diarization.js"
"index.html"
"sherpa-onnx-speaker-diarization.js"
"app-speaker-diarization.js"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speaker-diarization>/sherpa-onnx-wasm-main-speaker-diarization.wasm"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speaker-diarization>/sherpa-onnx-wasm-main-speaker-diarization.data"
DESTINATION
bin/wasm/speaker-diarization
)
... ...
const startBtn = document.getElementById('startBtn');
const hint = document.getElementById('hint');
const numClustersInput = document.getElementById('numClustersInputID');
const thresholdInput = document.getElementById('thresholdInputID');
const textArea = document.getElementById('text');
const fileSelectCtrl = document.getElementById('file');
let sd = null;
let float32Samples = null;
Module = {};
Module.onRuntimeInitialized = function() {
console.log('Model files downloaded!');
console.log('Initializing speaker diarization ......');
sd = createOfflineSpeakerDiarization(Module)
console.log('sampleRate', sd.sampleRate);
hint.innerText =
'Initialized! Please select a wave file and click the Start button.';
fileSelectCtrl.disabled = false;
};
function onFileChange() {
var files = document.getElementById('file').files;
if (files.length == 0) {
console.log('No file selected');
float32Samples = null;
startBtn.disabled = true;
return;
}
textArea.value = '';
console.log('files: ' + files);
const file = files[0];
console.log(file);
console.log('file.name ' + file.name);
console.log('file.type ' + file.type);
console.log('file.size ' + file.size);
let audioCtx = new AudioContext({sampleRate: sd.sampleRate});
let reader = new FileReader();
reader.onload = function() {
console.log('reading file!');
audioCtx.decodeAudioData(reader.result, decodedDone);
};
function decodedDone(decoded) {
let typedArray = new Float32Array(decoded.length);
float32Samples = decoded.getChannelData(0);
startBtn.disabled = false;
}
reader.readAsArrayBuffer(file);
}
startBtn.onclick = function() {
textArea.value = '';
if (float32Samples == null) {
alert('Empty audio samples!');
startBtn.disabled = true;
return;
}
let numClusters = numClustersInput.value;
if (numClusters.trim().length == 0) {
alert(
'Please provide numClusters. Use -1 if you are not sure how many speakers are there');
return;
}
if (!numClusters.match(/^\d+$/)) {
alert(`number of clusters ${
numClusters} is not an integer .\nPlease enter an integer`);
return;
}
numClusters = parseInt(numClusters, 10);
if (numClusters < -1) {
alert(`Number of clusters should be >= -1`);
return;
}
let threshold = 0.5;
if (numClusters <= 0) {
threshold = thresholdInput.value;
if (threshold.trim().length == 0) {
alert('Please provide a threshold.');
return;
}
threshold = parseFloat(threshold);
if (threshold < 0) {
alert(`Pleaser enter a positive threshold`);
return;
}
}
let config = sd.config
config.clustering = {numClusters: numClusters, threshold: threshold};
sd.setConfig(config);
let segments = sd.process(float32Samples);
if (segments == null) {
textArea.value = 'No speakers detected';
return
}
let s = '';
let sep = '';
for (seg of segments) {
// clang-format off
s += sep + `${seg.start.toFixed(2)} -- ${seg.end.toFixed(2)} speaker_${seg.speaker}`
// clang-format on
sep = '\n';
}
textArea.value = s;
}
... ...
# Introduction
Please refer to
https://github.com/k2-fsa/sherpa-onnx/releases/tag/speaker-segmentation-models
to download a speaker segmentation model
and
refer to
https://github.com/k2-fsa/sherpa-onnx/releases/tag/speaker-recongition-models
to download a speaker embedding extraction model.
Remember to rename the downloaded files.
The following is an example.
```bash
cd wasm/speaker-diarization/assets/
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speaker-segmentation-models/sherpa-onnx-pyannote-segmentation-3-0.tar.bz2
tar xvf sherpa-onnx-pyannote-segmentation-3-0.tar.bz2
rm sherpa-onnx-pyannote-segmentation-3-0.tar.bz2
cp sherpa-onnx-pyannote-segmentation-3-0/model.onnx ./segmentation.onnx
rm -rf sherpa-onnx-pyannote-segmentation-3-0
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speaker-recongition-models/3dspeaker_speech_eres2net_base_sv_zh-cn_3dspeaker_16k.onnx
mv 3dspeaker_speech_eres2net_base_sv_zh-cn_3dspeaker_16k.onnx ./embedding.onnx
```
... ...
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width" />
<title>Next-gen Kaldi WebAssembly with sherpa-onnx for Speaker Diarization</title>
<style>
h1,div {
text-align: center;
}
textarea {
width:100%;
}
</style>
</head>
<body>
<h1>
Next-gen Kaldi + WebAssembly<br/>
Speaker Diarization <br> with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a>
</h1>
<div>
<span id="hint">Loading model ... ...</span>
<br/>
<br/>
<label for="avatar">Choose a wav file:</label>
<input type="file" id="file" accept=".wav" onchange="onFileChange()" disabled></input>
<br/>
<br/>
<label for="numClusters" id="numClustersID">Number of speakers: </label>
<input type="text" id="numClustersInputID" name="numClusters" value="-1" />
<br/>
<br/>
<label for="clusteringThreshold" id="thresholdID">Clustering threshold: </label>
<input type="text" id="thresholdInputID" name="clusteringThreshold" value="0.5" />
<br/>
<br/>
<textarea id="text" rows="10" placeholder="If you know the actual number of speakers in the input wave file, please provide it via Number of speakers. Otherwise, please leave Number of speakers to -1 and provide Clustering threshold instead. A larger threshold leads to fewer clusters, i.e., fewer speakers; a smaller threshold leads to more clusters, i.e., more speakers."></textarea>
<br/>
<br/>
<button id="startBtn" disabled>Start</button>
</div>
<script src="app-speaker-diarization.js"></script>
<script src="sherpa-onnx-speaker-diarization.js"></script>
<script src="sherpa-onnx-wasm-main-speaker-diarization.js"></script>
</body>
... ...
function freeConfig(config, Module) {
if ('buffer' in config) {
Module._free(config.buffer);
}
if ('config' in config) {
freeConfig(config.config, Module)
}
if ('segmentation' in config) {
freeConfig(config.segmentation, Module)
}
if ('embedding' in config) {
freeConfig(config.embedding, Module)
}
if ('clustering' in config) {
freeConfig(config.clustering, Module)
}
Module._free(config.ptr);
}
function initSherpaOnnxOfflineSpeakerSegmentationPyannoteModelConfig(
config, Module) {
const modelLen = Module.lengthBytesUTF8(config.model || '') + 1;
const n = modelLen;
const buffer = Module._malloc(n);
const len = 1 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module.stringToUTF8(config.model || '', buffer + offset, modelLen);
offset += modelLen;
offset = 0;
Module.setValue(ptr, buffer + offset, 'i8*');
return {
buffer: buffer, ptr: ptr, len: len,
}
}
function initSherpaOnnxOfflineSpeakerSegmentationModelConfig(config, Module) {
if (!('pyannote' in config)) {
config.pyannote = {
model: '',
};
}
const pyannote = initSherpaOnnxOfflineSpeakerSegmentationPyannoteModelConfig(
config.pyannote, Module);
const len = pyannote.len + 3 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module._CopyHeap(pyannote.ptr, pyannote.len, ptr + offset);
offset += pyannote.len;
Module.setValue(ptr + offset, config.numThreads || 1, 'i32');
offset += 4;
Module.setValue(ptr + offset, config.debug || 1, 'i32');
offset += 4;
const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
const buffer = Module._malloc(providerLen);
Module.stringToUTF8(config.provider || 'cpu', buffer, providerLen);
Module.setValue(ptr + offset, buffer, 'i8*');
return {
buffer: buffer,
ptr: ptr,
len: len,
config: pyannote,
};
}
function initSherpaOnnxSpeakerEmbeddingExtractorConfig(config, Module) {
const modelLen = Module.lengthBytesUTF8(config.model || '') + 1;
const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
const n = modelLen + providerLen;
const buffer = Module._malloc(n);
const len = 4 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module.stringToUTF8(config.model || '', buffer + offset, modelLen);
offset += modelLen;
Module.stringToUTF8(config.provider || 'cpu', buffer + offset, providerLen);
offset += providerLen;
offset = 0
Module.setValue(ptr + offset, buffer, 'i8*');
offset += 4;
Module.setValue(ptr + offset, config.numThreads || 1, 'i32');
offset += 4;
Module.setValue(ptr + offset, config.debug || 1, 'i32');
offset += 4;
Module.setValue(ptr + offset, buffer + modelLen, 'i8*');
offset += 4;
return {
buffer: buffer,
ptr: ptr,
len: len,
};
}
function initSherpaOnnxFastClusteringConfig(config, Module) {
const len = 2 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module.setValue(ptr + offset, config.numClusters || -1, 'i32');
offset += 4;
Module.setValue(ptr + offset, config.threshold || 0.5, 'float');
offset += 4;
return {
ptr: ptr,
len: len,
};
}
function initSherpaOnnxOfflineSpeakerDiarizationConfig(config, Module) {
if (!('segmentation' in config)) {
config.segmentation = {
pyannote: {model: ''},
numThreads: 1,
debug: 0,
provider: 'cpu',
};
}
if (!('embedding' in config)) {
config.embedding = {
model: '',
numThreads: 1,
debug: 0,
provider: 'cpu',
};
}
if (!('clustering' in config)) {
config.clustering = {
numClusters: -1,
threshold: 0.5,
};
}
const segmentation = initSherpaOnnxOfflineSpeakerSegmentationModelConfig(
config.segmentation, Module);
const embedding =
initSherpaOnnxSpeakerEmbeddingExtractorConfig(config.embedding, Module);
const clustering =
initSherpaOnnxFastClusteringConfig(config.clustering, Module);
const len = segmentation.len + embedding.len + clustering.len + 2 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module._CopyHeap(segmentation.ptr, segmentation.len, ptr + offset);
offset += segmentation.len;
Module._CopyHeap(embedding.ptr, embedding.len, ptr + offset);
offset += embedding.len;
Module._CopyHeap(clustering.ptr, clustering.len, ptr + offset);
offset += clustering.len;
Module.setValue(ptr + offset, config.minDurationOn || 0.2, 'float');
offset += 4;
Module.setValue(ptr + offset, config.minDurationOff || 0.5, 'float');
offset += 4;
return {
ptr: ptr, len: len, segmentation: segmentation, embedding: embedding,
clustering: clustering,
}
}
class OfflineSpeakerDiarization {
constructor(configObj, Module) {
const config =
initSherpaOnnxOfflineSpeakerDiarizationConfig(configObj, Module)
// Module._MyPrint(config.ptr);
const handle =
Module._SherpaOnnxCreateOfflineSpeakerDiarization(config.ptr);
freeConfig(config, Module);
this.handle = handle;
this.sampleRate =
Module._SherpaOnnxOfflineSpeakerDiarizationGetSampleRate(this.handle);
this.Module = Module
this.config = configObj;
}
free() {
this.Module._SherpaOnnxDestroyOfflineSpeakerDiarization(this.handle);
this.handle = 0
}
setConfig(configObj) {
if (!('clustering' in configObj)) {
return;
}
const config =
initSherpaOnnxOfflineSpeakerDiarizationConfig(configObj, this.Module);
this.Module._SherpaOnnxOfflineSpeakerDiarizationSetConfig(
this.handle, config.ptr);
freeConfig(config, Module);
this.config.clustering = configObj.clustering;
}
process(samples) {
const pointer =
this.Module._malloc(samples.length * samples.BYTES_PER_ELEMENT);
this.Module.HEAPF32.set(samples, pointer / samples.BYTES_PER_ELEMENT);
let r = this.Module._SherpaOnnxOfflineSpeakerDiarizationProcess(
this.handle, pointer, samples.length);
this.Module._free(pointer);
let numSegments =
this.Module._SherpaOnnxOfflineSpeakerDiarizationResultGetNumSegments(r);
let segments =
this.Module._SherpaOnnxOfflineSpeakerDiarizationResultSortByStartTime(
r);
let ans = [];
let sizeOfSegment = 3 * 4;
for (let i = 0; i < numSegments; ++i) {
let p = segments + i * sizeOfSegment
let start = this.Module.HEAPF32[p / 4 + 0];
let end = this.Module.HEAPF32[p / 4 + 1];
let speaker = this.Module.HEAP32[p / 4 + 2];
ans.push({start: start, end: end, speaker: speaker});
}
this.Module._SherpaOnnxOfflineSpeakerDiarizationDestroySegment(segments);
this.Module._SherpaOnnxOfflineSpeakerDiarizationDestroyResult(r);
return ans;
}
}
function createOfflineSpeakerDiarization(Module, myConfig) {
const config = {
segmentation: {
pyannote: {model: './segmentation.onnx'},
},
embedding: {model: './embedding.onnx'},
clustering: {numClusters: -1, threshold: 0.5},
minDurationOn: 0.3,
minDurationOff: 0.5,
};
if (myConfig) {
config = myConfig;
}
return new OfflineSpeakerDiarization(config, Module);
}
if (typeof process == 'object' && typeof process.versions == 'object' &&
typeof process.versions.node == 'string') {
module.exports = {
createOfflineSpeakerDiarization,
};
}
... ...
// wasm/sherpa-onnx-wasm-main-speaker-diarization.cc
//
// Copyright (c) 2024 Xiaomi Corporation
#include <stdio.h>
#include <algorithm>
#include <memory>
#include "sherpa-onnx/c-api/c-api.h"
// see also
// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html
extern "C" {
static_assert(sizeof(SherpaOnnxOfflineSpeakerSegmentationPyannoteModelConfig) ==
1 * 4,
"");
static_assert(
sizeof(SherpaOnnxOfflineSpeakerSegmentationModelConfig) ==
sizeof(SherpaOnnxOfflineSpeakerSegmentationPyannoteModelConfig) + 3 * 4,
"");
static_assert(sizeof(SherpaOnnxFastClusteringConfig) == 2 * 4, "");
static_assert(sizeof(SherpaOnnxSpeakerEmbeddingExtractorConfig) == 4 * 4, "");
static_assert(sizeof(SherpaOnnxOfflineSpeakerDiarizationConfig) ==
sizeof(SherpaOnnxOfflineSpeakerSegmentationModelConfig) +
sizeof(SherpaOnnxSpeakerEmbeddingExtractorConfig) +
sizeof(SherpaOnnxFastClusteringConfig) + 2 * 4,
"");
void MyPrint(const SherpaOnnxOfflineSpeakerDiarizationConfig *sd_config) {
const auto &segmentation = sd_config->segmentation;
const auto &embedding = sd_config->embedding;
const auto &clustering = sd_config->clustering;
fprintf(stdout, "----------segmentation config----------\n");
fprintf(stdout, "pyannote model: %s\n", segmentation.pyannote.model);
fprintf(stdout, "num threads: %d\n", segmentation.num_threads);
fprintf(stdout, "debug: %d\n", segmentation.debug);
fprintf(stdout, "provider: %s\n", segmentation.provider);
fprintf(stdout, "----------embedding config----------\n");
fprintf(stdout, "model: %s\n", embedding.model);
fprintf(stdout, "num threads: %d\n", embedding.num_threads);
fprintf(stdout, "debug: %d\n", embedding.debug);
fprintf(stdout, "provider: %s\n", embedding.provider);
fprintf(stdout, "----------clustering config----------\n");
fprintf(stdout, "num_clusters: %d\n", clustering.num_clusters);
fprintf(stdout, "threshold: %.3f\n", clustering.threshold);
fprintf(stdout, "min_duration_on: %.3f\n", sd_config->min_duration_on);
fprintf(stdout, "min_duration_off: %.3f\n", sd_config->min_duration_off);
}
void CopyHeap(const char *src, int32_t num_bytes, char *dst) {
std::copy(src, src + num_bytes, dst);
}
}
... ...