Fangjun Kuang
Committed by GitHub

Use static libraries for MFC examples (#210)

name: mfc
on:
push:
branches:
- master
tags:
- '*'
paths:
- '.github/workflows/mfc.yaml'
- 'CMakeLists.txt'
- 'cmake/**'
- 'mfc-examples/**'
- 'sherpa-onnx/csrc/*'
pull_request:
branches:
- master
paths:
- '.github/workflows/mfc.yaml'
- 'CMakeLists.txt'
- 'cmake/**'
- 'mfc-examples/**'
- 'sherpa-onnx/csrc/*'
release:
types:
- published
workflow_dispatch:
inputs:
release:
description: "Whether to release"
type: boolean
env:
RELEASE:
|- # Release if there is a release tag name or a release flag in workflow_dispatch
${{ github.event.release.tag_name != '' || github.event.inputs.release == 'true' }}
concurrency:
group: mfc-${{ github.ref }}
cancel-in-progress: true
jobs:
mfc:
name: MFC for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [windows-latest]
# arch: [x64, Win32]
arch: [x64]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Display MSBuild info
shell: cmd
run: |
set path="C:\Program Files\Microsoft Visual Studio\2022\Enterprise\MSBuild\Current\Bin"
msbuild -help
- name: Configure CMake
shell: bash
run: |
mkdir build
cd build
cmake -A ${{ matrix.arch }} -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=./install ..
- name: Build sherpa-onnx for windows
shell: bash
run: |
cd build
cmake --build . --config Release -- -m:2
cmake --build . --config Release --target install -- -m:2
ls -lh install/*
ls -lh install/lib
ls -lh install/bin
- name: Build MFC
shell: cmd
run: |
set path="C:\Program Files\Microsoft Visual Studio\2022\Enterprise\MSBuild\Current\Bin"
cd mfc-examples
msbuild .\mfc-examples.sln /property:Configuration=Release /property:Platform=${{ matrix.arch }}
- name: Copy files
shell: bash
run: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
arch=${{ matrix.arch }}
cd mfc-examples/$arch/Release
cp StreamingSpeechRecognition.exe sherpa-onnx-streaming-${SHERPA_ONNX_VERSION}.exe
ls -lh
- name: Upload artifact
uses: actions/upload-artifact@v2
with:
name: streaming-speech-recognition-${{ matrix.arch }}
path: ./mfc-examples/${{ matrix.arch }}/Release/StreamingSpeechRecognition.exe
- name: Release pre-compiled binaries and libs for macOS
if: env.RELEASE == 'true'
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
overwrite: true
file: ./mfc-examples/${{ matrix.arch }}/Release/sherpa-onnx*.exe
... ...
... ... @@ -46,27 +46,13 @@ concurrency:
jobs:
windows_x64:
name: Windows x64
runs-on: ${{ matrix.os }}
name: ${{ matrix.vs-version }}
strategy:
fail-fast: false
matrix:
include:
- vs-version: vs2015
toolset-version: v140
os: windows-2019
- vs-version: vs2017
toolset-version: v141
os: windows-2019
- vs-version: vs2019
toolset-version: v142
os: windows-2022
- vs-version: vs2022
toolset-version: v143
os: windows-2022
os: [windows-latest]
shared_lib: [ON, OFF]
steps:
- uses: actions/checkout@v2
... ... @@ -78,7 +64,7 @@ jobs:
run: |
mkdir build
cd build
cmake -T ${{ matrix.toolset-version}},host=x64 -A x64 -D CMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=./install ..
cmake -A x64 -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=${{ matrix.shared_lib }} -DCMAKE_INSTALL_PREFIX=./install ..
- name: Build sherpa-onnx for windows
shell: bash
... ... @@ -122,7 +108,7 @@ jobs:
.github/scripts/test-online-transducer.sh
- name: Copy files
if: env.RELEASE == 'true' && matrix.vs-version == 'vs2015'
if: env.RELEASE == 'true'
shell: bash
run: |
SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
... ... @@ -137,7 +123,7 @@ jobs:
tar cjvf ${dst}.tar.bz2 $dst
- name: Release pre-compiled binaries and libs for macOS
if: env.RELEASE == 'true' && matrix.vs-version == 'vs2015'
if: env.RELEASE == 'true'
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
... ...
... ... @@ -44,28 +44,13 @@ concurrency:
jobs:
windows_x86:
if: true # disable windows x86 CI for now
name: Windows x86
runs-on: ${{ matrix.os }}
name: ${{ matrix.vs-version }}
strategy:
fail-fast: false
matrix:
include:
- vs-version: vs2015
toolset-version: v140
os: windows-2019
- vs-version: vs2017
toolset-version: v141
os: windows-2019
- vs-version: vs2019
toolset-version: v142
os: windows-2022
- vs-version: vs2022
toolset-version: v143
os: windows-2022
os: [windows-latest]
shared_lib: [ON, OFF]
steps:
- uses: actions/checkout@v2
... ... @@ -77,7 +62,7 @@ jobs:
run: |
mkdir build
cd build
cmake -T ${{ matrix.toolset-version}},host=x64 -A Win32 -D CMAKE_BUILD_TYPE=Release -D CMAKE_INSTALL_PREFIX=./install ..
cmake -A Win32 -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=${{ matrix.shared_lib }} -D CMAKE_INSTALL_PREFIX=./install ..
- name: Build sherpa-onnx for windows
shell: bash
... ...
cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
project(sherpa-onnx)
set(SHERPA_ONNX_VERSION "1.5.0")
set(SHERPA_ONNX_VERSION "1.5.1")
# Disable warning about
#
... ... @@ -71,6 +71,18 @@ if(BUILD_SHARED_LIBS AND MSVC)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
endif()
if(NOT BUILD_SHARED_LIBS AND MSVC)
# see https://cmake.org/cmake/help/latest/prop_tgt/MSVC_RUNTIME_LIBRARY.html
# https://stackoverflow.com/questions/14172856/compile-with-mt-instead-of-md-using-cmake
if(MSVC)
add_compile_options(
$<$<CONFIG:>:/MT> #---------|
$<$<CONFIG:Debug>:/MTd> #---|-- Statically link the runtime libraries
$<$<CONFIG:Release>:/MT> #--|
)
endif()
endif()
message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
message(STATUS "BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}")
... ... @@ -154,3 +166,4 @@ add_subdirectory(sherpa-onnx)
if(SHERPA_ONNX_ENABLE_C_API)
add_subdirectory(c-api-examples)
endif()
message(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
... ...
filter=-./mfc-examples
... ...
function(download_kaldi_native_fbank)
include(FetchContent)
set(kaldi_native_fbank_URL "https://github.com/csukuangfj/kaldi-native-fbank/archive/refs/tags/v1.14.tar.gz")
set(kaldi_native_fbank_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/kaldi-native-fbank-1.14.tar.gz")
set(kaldi_native_fbank_HASH "SHA256=6a66638a111d3ce21fe6f29cbf9ab3dbcae2331c77391bf825927df5cbf2babe")
set(kaldi_native_fbank_URL "https://github.com/csukuangfj/kaldi-native-fbank/archive/refs/tags/v1.17.tar.gz")
set(kaldi_native_fbank_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/kaldi-native-fbank-1.17.tar.gz")
set(kaldi_native_fbank_HASH "SHA256=300dc282d51d738e70f194ef13a50bf4cf8d54a3b2686d75f7fc2fb821f8c1e6")
set(KALDI_NATIVE_FBANK_BUILD_TESTS OFF CACHE BOOL "" FORCE)
set(KALDI_NATIVE_FBANK_BUILD_PYTHON OFF CACHE BOOL "" FORCE)
... ... @@ -12,11 +12,11 @@ function(download_kaldi_native_fbank)
# If you don't have access to the Internet,
# please pre-download kaldi-native-fbank
set(possible_file_locations
$ENV{HOME}/Downloads/kaldi-native-fbank-1.14.tar.gz
${PROJECT_SOURCE_DIR}/kaldi-native-fbank-1.14.tar.gz
${PROJECT_BINARY_DIR}/kaldi-native-fbank-1.14.tar.gz
/tmp/kaldi-native-fbank-1.14.tar.gz
/star-fj/fangjun/download/github/kaldi-native-fbank-1.14.tar.gz
$ENV{HOME}/Downloads/kaldi-native-fbank-1.17.tar.gz
${PROJECT_SOURCE_DIR}/kaldi-native-fbank-1.17.tar.gz
${PROJECT_BINARY_DIR}/kaldi-native-fbank-1.17.tar.gz
/tmp/kaldi-native-fbank-1.17.tar.gz
/star-fj/fangjun/download/github/kaldi-native-fbank-1.17.tar.gz
)
foreach(f IN LISTS possible_file_locations)
... ...
... ... @@ -88,50 +88,80 @@ function(download_onnxruntime)
message(STATUS "CMAKE_VS_PLATFORM_NAME: ${CMAKE_VS_PLATFORM_NAME}")
if(CMAKE_VS_PLATFORM_NAME STREQUAL Win32 OR CMAKE_VS_PLATFORM_NAME STREQUAL win32)
# If you don't have access to the Internet,
# please pre-download onnxruntime
#
# for 32-bit windows
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x86-1.15.1.zip
${PROJECT_SOURCE_DIR}/onnxruntime-win-x86-1.15.1.zip
${PROJECT_BINARY_DIR}/onnxruntime-win-x86-1.15.1.zip
/tmp/onnxruntime-win-x86-1.15.1.zip
)
if(BUILD_SHARED_LIBS)
# If you don't have access to the Internet,
# please pre-download onnxruntime
#
# for 32-bit windows
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x86-1.15.1.zip
${PROJECT_SOURCE_DIR}/onnxruntime-win-x86-1.15.1.zip
${PROJECT_BINARY_DIR}/onnxruntime-win-x86-1.15.1.zip
/tmp/onnxruntime-win-x86-1.15.1.zip
)
set(onnxruntime_URL "https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-win-x86-1.15.1.zip")
set(onnxruntime_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/onnxruntime-win-x86-1.15.1.zip")
set(onnxruntime_HASH "SHA256=8de18fdf274a8adcd95272fcf58beda0fe2fb37f0cd62c02bc4bb6200429e4e2")
set(onnxruntime_URL "https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-win-x86-1.15.1.zip")
set(onnxruntime_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/onnxruntime-win-x86-1.15.1.zip")
set(onnxruntime_HASH "SHA256=8de18fdf274a8adcd95272fcf58beda0fe2fb37f0cd62c02bc4bb6200429e4e2")
else()
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x86-static-1.15.1.tar.bz2
${PROJECT_SOURCE_DIR}/onnxruntime-win-x86-static-1.15.1.tar.bz2
${PROJECT_BINARY_DIR}/onnxruntime-win-x86-static-1.15.1.tar.bz2
/tmp/onnxruntime-win-x86-static-1.15.1.tar.bz2
)
set(onnxruntime_URL "https://huggingface.co/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-win-x86-static-1.15.1.tar.bz2")
set(onnxruntime_URL2 "")
set(onnxruntime_HASH "SHA256=a2b33a3e8a1f89cddf303f0a97a5a88f4202579c653cfb29158c8cf7da3734eb")
endif()
if(SHERPA_ONNX_ENABLE_GPU)
message(FATAL_ERROR "GPU support for Win32 is not supported!")
endif()
else()
# If you don't have access to the Internet,
# please pre-download onnxruntime
#
# for 64-bit windows
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x64-1.15.1.zip
${PROJECT_SOURCE_DIR}/onnxruntime-win-x64-1.15.1.zip
${PROJECT_BINARY_DIR}/onnxruntime-win-x64-1.15.1.zip
/tmp/onnxruntime-win-x64-1.15.1.zip
)
set(onnxruntime_URL "https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-win-x64-1.15.1.zip")
set(onnxruntime_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/onnxruntime-win-x64-1.15.1.zip")
set(onnxruntime_HASH "SHA256=261308ee5526dfd3f405ce8863e43d624a2e0bcd16b2d33cdea8c120ab3534d3")
if(BUILD_SHARED_LIBS)
# If you don't have access to the Internet,
# please pre-download onnxruntime
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x64-1.15.1.zip
${PROJECT_SOURCE_DIR}/onnxruntime-win-x64-1.15.1.zip
${PROJECT_BINARY_DIR}/onnxruntime-win-x64-1.15.1.zip
/tmp/onnxruntime-win-x64-1.15.1.zip
)
if(SHERPA_ONNX_ENABLE_GPU)
set(onnxruntime_URL "https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-win-x64-gpu-1.15.1.zip")
set(onnxruntime_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/onnxruntime-win-x64-gpu-1.15.1.zip")
set(onnxruntime_HASH "SHA256=dcc3a385b415dd2e4a813018b71da5085d9b97774552edf17947826a255a3732")
set(onnxruntime_URL "https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-win-x64-1.15.1.zip")
set(onnxruntime_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/onnxruntime-win-x64-1.15.1.zip")
set(onnxruntime_HASH "SHA256=261308ee5526dfd3f405ce8863e43d624a2e0bcd16b2d33cdea8c120ab3534d3")
if(SHERPA_ONNX_ENABLE_GPU)
set(onnxruntime_URL "https://github.com/microsoft/onnxruntime/releases/download/v1.15.1/onnxruntime-win-x64-gpu-1.15.1.zip")
set(onnxruntime_URL2 "https://huggingface.co/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/onnxruntime-win-x64-gpu-1.15.1.zip")
set(onnxruntime_HASH "SHA256=dcc3a385b415dd2e4a813018b71da5085d9b97774552edf17947826a255a3732")
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x64-gpu-1.15.1.zip
${PROJECT_SOURCE_DIR}/onnxruntime-win-x64-gpu-1.15.1.zip
${PROJECT_BINARY_DIR}/onnxruntime-win-x64-gpu-1.15.1.zip
/tmp/onnxruntime-win-x64-gpu-1.15.1.zip
)
endif()
else()
# static libraries for windows x64
message(STATUS "Use static onnxruntime libraries")
# If you don't have access to the Internet,
# please pre-download onnxruntime
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-win-x64-gpu-1.15.1.zip
${PROJECT_SOURCE_DIR}/onnxruntime-win-x64-gpu-1.15.1.zip
${PROJECT_BINARY_DIR}/onnxruntime-win-x64-gpu-1.15.1.zip
/tmp/onnxruntime-win-x64-gpu-1.15.1.zip
$ENV{HOME}/Downloads/onnxruntime-win-x64-static-1.15.1.tar.bz2
${PROJECT_SOURCE_DIR}/onnxruntime-win-x64-static-1.15.1.tar.bz2
${PROJECT_BINARY_DIR}/onnxruntime-win-x64-static-1.15.1.tar.bz2
/tmp/onnxruntime-win-x64-static-1.15.1.tar.bz2
)
set(onnxruntime_URL "https://huggingface.co/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-win-x64-static-1.15.1.tar.bz2")
set(onnxruntime_URL2 "")
set(onnxruntime_HASH "SHA256=f5c19ac1fc6a61c78a231a41df10aede2586665ab397bdc3f007eb8d2c8d4a19")
endif()
endif()
# After downloading, it contains:
... ... @@ -170,20 +200,22 @@ function(download_onnxruntime)
endif()
message(STATUS "onnxruntime is downloaded to ${onnxruntime_SOURCE_DIR}")
find_library(location_onnxruntime onnxruntime
PATHS
"${onnxruntime_SOURCE_DIR}/lib"
NO_CMAKE_SYSTEM_PATH
)
if(BUILD_SHARED_LIBS OR NOT WIN32)
find_library(location_onnxruntime onnxruntime
PATHS
"${onnxruntime_SOURCE_DIR}/lib"
NO_CMAKE_SYSTEM_PATH
)
message(STATUS "location_onnxruntime: ${location_onnxruntime}")
message(STATUS "location_onnxruntime: ${location_onnxruntime}")
add_library(onnxruntime SHARED IMPORTED)
add_library(onnxruntime SHARED IMPORTED)
set_target_properties(onnxruntime PROPERTIES
IMPORTED_LOCATION ${location_onnxruntime}
INTERFACE_INCLUDE_DIRECTORIES "${onnxruntime_SOURCE_DIR}/include"
)
set_target_properties(onnxruntime PROPERTIES
IMPORTED_LOCATION ${location_onnxruntime}
INTERFACE_INCLUDE_DIRECTORIES "${onnxruntime_SOURCE_DIR}/include"
)
endif()
if(SHERPA_ONNX_ENABLE_GPU AND NOT WIN32)
find_library(location_onnxruntime_cuda_lib onnxruntime_providers_cuda
... ... @@ -198,32 +230,37 @@ function(download_onnxruntime)
endif()
if(WIN32)
set_property(TARGET onnxruntime
PROPERTY
IMPORTED_IMPLIB "${onnxruntime_SOURCE_DIR}/lib/onnxruntime.lib"
)
file(COPY ${onnxruntime_SOURCE_DIR}/lib/onnxruntime.dll
DESTINATION
${CMAKE_BINARY_DIR}/bin/${CMAKE_BUILD_TYPE}
)
if(SHERPA_ONNX_ENABLE_GPU)
add_library(onnxruntime_providers_cuda SHARED IMPORTED)
set_target_properties(onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION ${location_onnxruntime}
INTERFACE_INCLUDE_DIRECTORIES "${onnxruntime_SOURCE_DIR}/include"
)
set_property(TARGET onnxruntime_providers_cuda
if(BUILD_SHARED_LIBS)
set_property(TARGET onnxruntime
PROPERTY
IMPORTED_IMPLIB "${onnxruntime_SOURCE_DIR}/lib/onnxruntime_providers_cuda.lib"
IMPORTED_IMPLIB "${onnxruntime_SOURCE_DIR}/lib/onnxruntime.lib"
)
file(COPY ${onnxruntime_SOURCE_DIR}/lib/onnxruntime_providers_cuda.dll
file(COPY ${onnxruntime_SOURCE_DIR}/lib/onnxruntime.dll
DESTINATION
${CMAKE_BINARY_DIR}/bin/${CMAKE_BUILD_TYPE}
)
if(SHERPA_ONNX_ENABLE_GPU)
add_library(onnxruntime_providers_cuda SHARED IMPORTED)
set_target_properties(onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION ${location_onnxruntime}
INTERFACE_INCLUDE_DIRECTORIES "${onnxruntime_SOURCE_DIR}/include"
)
set_property(TARGET onnxruntime_providers_cuda
PROPERTY
IMPORTED_IMPLIB "${onnxruntime_SOURCE_DIR}/lib/onnxruntime_providers_cuda.lib"
)
file(COPY ${onnxruntime_SOURCE_DIR}/lib/onnxruntime_providers_cuda.dll
DESTINATION
${CMAKE_BINARY_DIR}/bin/${CMAKE_BUILD_TYPE}
)
endif()
else()
# for static libraries, we use onnxruntime_lib_files directly below
include_directories(${onnxruntime_SOURCE_DIR}/include)
endif()
endif()
... ... @@ -232,7 +269,12 @@ function(download_onnxruntime)
elseif(APPLE)
file(GLOB onnxruntime_lib_files "${onnxruntime_SOURCE_DIR}/lib/libonnxruntime.*.*dylib")
elseif(WIN32)
file(GLOB onnxruntime_lib_files "${onnxruntime_SOURCE_DIR}/lib/*.dll")
if(BUILD_SHARED_LIBS)
file(GLOB onnxruntime_lib_files "${onnxruntime_SOURCE_DIR}/lib/*.dll")
else()
file(GLOB onnxruntime_lib_files "${onnxruntime_SOURCE_DIR}/lib/*.lib")
set(onnxruntime_lib_files ${onnxruntime_lib_files} PARENT_SCOPE)
endif()
endif()
message(STATUS "onnxruntime lib files: ${onnxruntime_lib_files}")
... ...
... ... @@ -17,13 +17,12 @@ git clone https://github.com/k2-fsa/sherpa-onnx
cd sherpa-onnx
mkdir build
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=./install ..
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=./install ..
cmake --build . --config Release --target install
cd ../mfc-examples
msbuild ./mfc-examples.sln /property:Configuration=Release /property:Platform=x64
cp ../build/install/lib/*.dll ./x64/Release/
# now run the program
... ...
... ... @@ -3,11 +3,12 @@
// application.
//
#include "StreamingSpeechRecognition.h"
#include "pch.h"
#include "framework.h"
#include "StreamingSpeechRecognition.h"
#include "StreamingSpeechRecognitionDlg.h"
#include "framework.h"
#include "pch.h"
#ifdef _DEBUG
#define new DEBUG_NEW
... ...
B// Microsoft Visual C++ generated resource script.
... ...
... ... @@ -51,7 +51,7 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v142</PlatformToolset>
<PlatformToolset>v143</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
<UseOfMfc>Static</UseOfMfc>
... ...
// StreamingSpeechRecognitionDlg.cpp : implementation file
//
#include "pch.h"
#include "framework.h"
#include "afxdialogex.h"
#include "StreamingSpeechRecognitionDlg.h"
... ... @@ -10,9 +14,7 @@
#include <vector>
#include "StreamingSpeechRecognition.h"
#include "afxdialogex.h"
#include "framework.h"
#include "pch.h"
#ifdef _DEBUG
#define new DEBUG_NEW
... ... @@ -131,6 +133,7 @@ static int32_t RecordCallback(const void *input_buffer,
void CStreamingSpeechRecognitionDlg::OnBnClickedOk() {
if (!recognizer_) {
AppendLineToMultilineEditCtrl("Creating recognizer...");
AppendLineToMultilineEditCtrl("It will take several seconds. Please wait");
InitRecognizer();
if (!recognizer_) {
// failed to create the recognizer
... ... @@ -264,20 +267,56 @@ void CStreamingSpeechRecognitionDlg::InitRecognizer() {
if (!is_ok) {
my_btn_.EnableWindow(FALSE);
std::string msg =
"\r\nPlease go to "
"\r\nPlease go to\r\n"
"https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html "
"\r\n";
msg += "to download an offline model.";
msg += "to download a streaming model, i.e., an online model.\r\n";
msg +=
"You need to rename them to encoder.onnx, decoder.onnx, and "
"joiner.onnx correspoondingly.\r\n\r\n";
msg +=
" You need to rename them to encoder.onnx, decoder.onnx, and "
"joiner.onnx correspoondingly";
"We use the following model as an example to show you how to do "
"that.\r\n";
msg +=
"https://huggingface.co/pkufool/"
"icefall-asr-zipformer-streaming-wenetspeech-20230615";
msg += "\r\n\r\n";
msg +=
"wget https:// "
"huggingface.co/pkufool/"
"icefall-asr-zipformer-streaming-wenetspeech-20230615/resolve/main/exp/"
"encoder-epoch-12-avg-4-chunk-16-left-128.onnx\r\n";
msg +=
"wget https:// "
"huggingface.co/pkufool/"
"icefall-asr-zipformer-streaming-wenetspeech-20230615/resolve/main/exp/"
"decoder-epoch-12-avg-4-chunk-16-left-128.onnx\r\n";
msg +=
"wget https:// "
"huggingface.co/pkufool/"
"icefall-asr-zipformer-streaming-wenetspeech-20230615/resolve/main/exp/"
"joiner-epoch-12-avg-4-chunk-16-left-128.onnx\r\n";
msg +=
"wget "
"https://huggingface.co/pkufool/"
"icefall-asr-zipformer-streaming-wenetspeech-20230615/resolve/main/"
"data/lang_char/tokens.txt\r\n";
msg += "\r\nNow rename them.\r\n";
msg += "mv encoder-epoch-12-avg-4-chunk-16-left-128.onnx encoder.onnx\r\n";
msg += "mv decoder-epoch-12-avg-4-chunk-16-left-128.onnx decoder.onnx\r\n";
msg += "mv joiner-epoch-12-avg-4-chunk-16-left-128.onnx joiner.onnx\r\n";
msg += "\r\n";
msg += "That's it!\r\n";
AppendLineToMultilineEditCtrl(msg);
return;
}
SherpaOnnxOnlineRecognizerConfig config;
config.model_config.debug = 0;
config.model_config.num_threads = 2;
config.model_config.num_threads = 1;
config.model_config.provider = "cpu";
config.decoding_method = "greedy_search";
... ... @@ -301,7 +340,7 @@ void CStreamingSpeechRecognitionDlg::InitRecognizer() {
// see
// https://stackoverflow.com/questions/7153935/how-to-convert-utf-8-stdstring-to-utf-16-stdwstring
std::wstring Utf8ToUtf16(const std::string &utf8) {
static std::wstring Utf8ToUtf16(const std::string &utf8) {
std::vector<unsigned long> unicode;
size_t i = 0;
while (i < utf8.size()) {
... ... @@ -392,6 +431,7 @@ static std::string Cat(const std::vector<std::string> &results,
int CStreamingSpeechRecognitionDlg::RunThread() {
std::vector<std::string> results;
std::string last_text;
while (started_) {
while (IsOnlineStreamReady(recognizer_, stream_)) {
... ... @@ -406,6 +446,8 @@ int CStreamingSpeechRecognitionDlg::RunThread() {
// str.Format(_T("%s"), Cat(results, text).c_str());
auto str = Utf8ToUtf16(Cat(results, text).c_str());
my_text_.SetWindowText(str.c_str());
my_text_.SetFocus();
my_text_.SetSel(-1);
last_text = text;
}
int is_endpoint = IsEndpoint(recognizer_, stream_);
... ...
... ... @@ -48,7 +48,7 @@ class CStreamingSpeechRecognitionDlg : public CDialogEx {
SherpaOnnxOnlineRecognizer *recognizer_ = nullptr;
PaStream *pa_stream_ = nullptr;
RecognizerThread *thread_;
RecognizerThread *thread_ = nullptr;
CButton my_btn_;
CEdit my_text_;
... ...
... ... @@ -5,17 +5,46 @@
<PropertyGroup>
<SherpaOnnxBuildDirectory>..\..\build</SherpaOnnxBuildDirectory>
<SherpaOnnxInstallDirectory>..\..\build\install</SherpaOnnxInstallDirectory>
<SherpaOnnxLibraries>sherpa-onnx-portaudio.lib;sherpa-onnx-c-api.lib;sherpa-onnx-core.lib</SherpaOnnxLibraries>
<SherpaOnnxLibraries>
sherpa-onnx-portaudio_static.lib;
sherpa-onnx-c-api.lib;
sherpa-onnx-core.lib;
kaldi-native-fbank-core.lib;
absl_base.lib;
absl_city.lib;
absl_hash.lib;
absl_low_level_hash.lib;
absl_raw_hash_set.lib;
absl_raw_logging_internal.lib;
absl_throw_delegate.lib;
clog.lib;
cpuinfo.lib;
flatbuffers.lib;
libprotobuf-lite.lib;
onnx.lib;
onnx_proto.lib;
onnxruntime_common.lib;
onnxruntime_flatbuffers.lib;
onnxruntime_framework.lib;
onnxruntime_graph.lib;
onnxruntime_mlas.lib;
onnxruntime_optimizer.lib;
onnxruntime_providers.lib;
onnxruntime_session.lib;
onnxruntime_util.lib;
re2.lib;
</SherpaOnnxLibraries>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>
$(SherpaOnnxBuildDirectory)\_deps\portaudio-src\include;$(SherpaOnnxInstallDirectory)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
$(SherpaOnnxBuildDirectory)\_deps\portaudio-src\include;
$(SherpaOnnxInstallDirectory)\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<AdditionalLibraryDirectories>$(SherpaOnnxInstallDirectory)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>$(SherpaOnnxLibraries)</AdditionalDependencies>
</Link>
<AdditionalLibraryDirectories>$(SherpaOnnxInstallDirectory)\lib;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>$(SherpaOnnxLibraries);</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup />
</Project>
... ...
... ... @@ -75,10 +75,13 @@ if(ANDROID_NDK)
target_link_libraries(sherpa-onnx-core android log)
endif()
target_link_libraries(sherpa-onnx-core
onnxruntime
kaldi-native-fbank-core
)
target_link_libraries(sherpa-onnx-core kaldi-native-fbank-core)
if(BUILD_SHARED_LIBS OR NOT WIN32)
target_link_libraries(sherpa-onnx-core onnxruntime)
else()
target_link_libraries(sherpa-onnx-core ${onnxruntime_lib_files})
endif()
if(SHERPA_ONNX_ENABLE_GPU)
target_link_libraries(sherpa-onnx-core
... ...
... ... @@ -31,7 +31,7 @@ enum class ModelType {
kUnkown,
};
}
} // namespace
namespace sherpa_onnx {
... ...
... ... @@ -9,11 +9,11 @@
#include <algorithm>
#include <memory>
#include <numeric>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include <numeric>
#if __ANDROID_API__ >= 9
#include "android/asset_manager.h"
... ... @@ -78,7 +78,7 @@ OnlineZipformer2TransducerModel::OnlineZipformer2TransducerModel(
#endif
void OnlineZipformer2TransducerModel::InitEncoder(void *model_data,
size_t model_data_length) {
size_t model_data_length) {
encoder_sess_ = std::make_unique<Ort::Session>(env_, model_data,
model_data_length, sess_opts_);
... ... @@ -130,7 +130,7 @@ void OnlineZipformer2TransducerModel::InitEncoder(void *model_data,
}
void OnlineZipformer2TransducerModel::InitDecoder(void *model_data,
size_t model_data_length) {
size_t model_data_length) {
decoder_sess_ = std::make_unique<Ort::Session>(env_, model_data,
model_data_length, sess_opts_);
... ... @@ -155,7 +155,7 @@ void OnlineZipformer2TransducerModel::InitDecoder(void *model_data,
}
void OnlineZipformer2TransducerModel::InitJoiner(void *model_data,
size_t model_data_length) {
size_t model_data_length) {
joiner_sess_ = std::make_unique<Ort::Session>(env_, model_data,
model_data_length, sess_opts_);
... ... @@ -252,7 +252,8 @@ std::vector<Ort::Value> OnlineZipformer2TransducerModel::StackStates(
std::vector<std::vector<Ort::Value>>
OnlineZipformer2TransducerModel::UnStackStates(
const std::vector<Ort::Value> &states) const {
int32_t m = std::accumulate(num_encoder_layers_.begin(), num_encoder_layers_.end(), 0);
int32_t m = std::accumulate(num_encoder_layers_.begin(),
num_encoder_layers_.end(), 0);
assert(states.size() == m * 6 + 2);
int32_t batch_size = states[0].GetTensorTypeAndShapeInfo().GetShape()[1];
... ... @@ -332,10 +333,12 @@ OnlineZipformer2TransducerModel::UnStackStates(
return ans;
}
std::vector<Ort::Value> OnlineZipformer2TransducerModel::GetEncoderInitStates() {
std::vector<Ort::Value>
OnlineZipformer2TransducerModel::GetEncoderInitStates() {
std::vector<Ort::Value> ans;
int32_t n = static_cast<int32_t>(encoder_dims_.size());
int32_t m = std::accumulate(num_encoder_layers_.begin(), num_encoder_layers_.end(), 0);
int32_t m = std::accumulate(num_encoder_layers_.begin(),
num_encoder_layers_.end(), 0);
ans.reserve(m * 6 + 2);
for (int32_t i = 0; i != n; ++i) {
... ... @@ -354,7 +357,8 @@ std::vector<Ort::Value> OnlineZipformer2TransducerModel::GetEncoderInitStates()
}
{
std::array<int64_t, 4> s{1, 1, left_context_len_[i], nonlin_attn_head_dim};
std::array<int64_t, 4> s{1, 1, left_context_len_[i],
nonlin_attn_head_dim};
auto v =
Ort::Value::CreateTensor<float>(allocator_, s.data(), s.size());
Fill(&v, 0);
... ... @@ -378,7 +382,8 @@ std::vector<Ort::Value> OnlineZipformer2TransducerModel::GetEncoderInitStates()
}
{
std::array<int64_t, 3> s{1, encoder_dims_[i], cnn_module_kernels_[i] / 2};
std::array<int64_t, 3> s{1, encoder_dims_[i],
cnn_module_kernels_[i] / 2};
auto v =
Ort::Value::CreateTensor<float>(allocator_, s.data(), s.size());
Fill(&v, 0);
... ... @@ -386,7 +391,8 @@ std::vector<Ort::Value> OnlineZipformer2TransducerModel::GetEncoderInitStates()
}
{
std::array<int64_t, 3> s{1, encoder_dims_[i], cnn_module_kernels_[i] / 2};
std::array<int64_t, 3> s{1, encoder_dims_[i],
cnn_module_kernels_[i] / 2};
auto v =
Ort::Value::CreateTensor<float>(allocator_, s.data(), s.size());
Fill(&v, 0);
... ... @@ -413,8 +419,8 @@ std::vector<Ort::Value> OnlineZipformer2TransducerModel::GetEncoderInitStates()
std::pair<Ort::Value, std::vector<Ort::Value>>
OnlineZipformer2TransducerModel::RunEncoder(Ort::Value features,
std::vector<Ort::Value> states,
Ort::Value /* processed_frames */) {
std::vector<Ort::Value> states,
Ort::Value /* processed_frames */) {
std::vector<Ort::Value> encoder_inputs;
encoder_inputs.reserve(1 + states.size());
... ... @@ -446,7 +452,7 @@ Ort::Value OnlineZipformer2TransducerModel::RunDecoder(
}
Ort::Value OnlineZipformer2TransducerModel::RunJoiner(Ort::Value encoder_out,
Ort::Value decoder_out) {
Ort::Value decoder_out) {
std::array<Ort::Value, 2> joiner_input = {std::move(encoder_out),
std::move(decoder_out)};
auto logit =
... ...
... ... @@ -17,9 +17,11 @@ static void PybindOnlineRecognizerResult(py::module *m) {
.def_property_readonly(
"text", [](PyClass &self) -> std::string { return self.text; })
.def_property_readonly(
"tokens", [](PyClass &self) -> std::vector<std::string> { return self.tokens; })
"tokens",
[](PyClass &self) -> std::vector<std::string> { return self.tokens; })
.def_property_readonly(
"timestamps", [](PyClass &self) -> std::vector<float> { return self.timestamps; });
"timestamps",
[](PyClass &self) -> std::vector<float> { return self.timestamps; });
}
static void PybindOnlineRecognizerConfig(py::module *m) {
... ...