Toggle navigation
Toggle navigation
此项目
正在载入...
Sign in
xuning
/
sherpaonnx
转到一个项目
Toggle navigation
项目
群组
代码片段
帮助
Toggle navigation pinning
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Network
Create a new issue
Builds
Commits
Authored by
Yuekai Zhang
2023-05-12 22:30:47 +0800
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Committed by
GitHub
2023-05-12 22:30:47 +0800
Commit
b8fbf8e5ce414c0ec7de232e7d6328af12c99329
b8fbf8e5
1 parent
cea718e3
Add onnxruntime gpu for cmake (#153)
* add onnxruntime gpu for cmake * fix clang * fix typo * cpplint
显示空白字符变更
内嵌
并排对比
正在显示
4 个修改的文件
包含
72 行增加
和
0 行删除
CMakeLists.txt
cmake/onnxruntime.cmake
sherpa-onnx/csrc/CMakeLists.txt
sherpa-onnx/csrc/session.cc
CMakeLists.txt
查看文件 @
b8fbf8e
...
...
@@ -19,6 +19,7 @@ option(SHERPA_ONNX_ENABLE_PORTAUDIO "Whether to build with portaudio" ON)
option
(
SHERPA_ONNX_ENABLE_JNI
"Whether to build JNI internface"
OFF
)
option
(
SHERPA_ONNX_ENABLE_C_API
"Whether to build C API"
ON
)
option
(
SHERPA_ONNX_ENABLE_WEBSOCKET
"Whether to build webscoket server/client"
ON
)
option
(
SHERPA_ONNX_ENABLE_GPU
"Enable ONNX Runtime GPU support"
OFF
)
set
(
CMAKE_ARCHIVE_OUTPUT_DIRECTORY
"
${
CMAKE_BINARY_DIR
}
/lib"
)
set
(
CMAKE_LIBRARY_OUTPUT_DIRECTORY
"
${
CMAKE_BINARY_DIR
}
/lib"
)
...
...
@@ -71,6 +72,7 @@ message(STATUS "SHERPA_ONNX_ENABLE_PORTAUDIO ${SHERPA_ONNX_ENABLE_PORTAUDIO}")
message
(
STATUS
"SHERPA_ONNX_ENABLE_JNI
${
SHERPA_ONNX_ENABLE_JNI
}
"
)
message
(
STATUS
"SHERPA_ONNX_ENABLE_C_API
${
SHERPA_ONNX_ENABLE_C_API
}
"
)
message
(
STATUS
"SHERPA_ONNX_ENABLE_WEBSOCKET
${
SHERPA_ONNX_ENABLE_WEBSOCKET
}
"
)
message
(
STATUS
"SHERPA_ONNX_ENABLE_GPU
${
SHERPA_ONNX_ENABLE_GPU
}
"
)
set
(
CMAKE_CXX_STANDARD 14 CACHE STRING
"The C++ version to be used."
)
set
(
CMAKE_CXX_EXTENSIONS OFF
)
...
...
cmake/onnxruntime.cmake
查看文件 @
b8fbf8e
...
...
@@ -33,6 +33,14 @@ function(download_onnxruntime)
#
# ./include
# It contains all the needed header files
if
(
SHERPA_ONNX_ENABLE_GPU
)
set
(
onnxruntime_URL
"https://github.com/microsoft/onnxruntime/releases/download/v1.14.1/onnxruntime-linux-x64-gpu-1.14.1.tgz"
)
endif
()
# After downloading, it contains:
# ./lib/libonnxruntime.so.1.14.1
# ./lib/libonnxruntime.so, which is a symlink to lib/libonnxruntime.so.1.14.1
# ./lib/libonnxruntime_providers_cuda.so
# ./include, which contains all the needed header files
elseif
(
APPLE
)
# If you don't have access to the Internet,
# please pre-download onnxruntime
...
...
@@ -97,6 +105,7 @@ function(download_onnxruntime)
message
(
FATAL_ERROR
"Only support Linux, macOS, and Windows at present. Will support other OSes later"
)
endif
()
if
(
NOT SHERPA_ONNX_ENABLE_GPU
)
foreach
(
f IN LISTS possible_file_locations
)
if
(
EXISTS
${
f
}
)
set
(
onnxruntime_URL
"
${
f
}
"
)
...
...
@@ -112,6 +121,12 @@ function(download_onnxruntime)
${
onnxruntime_URL2
}
URL_HASH
${
onnxruntime_HASH
}
)
else
()
FetchContent_Declare
(
onnxruntime
URL
${
onnxruntime_URL
}
)
endif
()
FetchContent_GetProperties
(
onnxruntime
)
if
(
NOT onnxruntime_POPULATED
)
...
...
@@ -134,6 +149,19 @@ function(download_onnxruntime)
IMPORTED_LOCATION
${
location_onnxruntime
}
INTERFACE_INCLUDE_DIRECTORIES
"
${
onnxruntime_SOURCE_DIR
}
/include"
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
find_library
(
location_onnxruntime_cuda_lib onnxruntime_providers_cuda
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
add_library
(
onnxruntime_providers_cuda SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_cuda_lib
}
)
endif
()
if
(
WIN32
)
set_property
(
TARGET onnxruntime
PROPERTY
...
...
@@ -185,6 +213,12 @@ if(DEFINED ENV{SHERPA_ONNXRUNTIME_LIB_DIR})
if
(
NOT EXISTS
${
location_onnxruntime_lib
}
)
set
(
location_onnxruntime_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime.a
)
endif
()
if
(
SHERPA_ONNX_ENABLE_GPU
)
set
(
location_onnxruntime_cuda_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime_providers_cuda.so
)
if
(
NOT EXISTS
${
location_onnxruntime_cuda_lib
}
)
set
(
location_onnxruntime_cuda_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime_providers_cuda.a
)
endif
()
endif
()
else
()
find_library
(
location_onnxruntime_lib onnxruntime
PATHS
...
...
@@ -192,9 +226,21 @@ else()
/usr/lib
/usr/local/lib
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
find_library
(
location_onnxruntime_cuda_lib onnxruntime_providers_cuda
PATHS
/lib
/usr/lib
/usr/local/lib
)
endif
()
endif
()
message
(
STATUS
"location_onnxruntime_lib:
${
location_onnxruntime_lib
}
"
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
message
(
STATUS
"location_onnxruntime_cuda_lib:
${
location_onnxruntime_cuda_lib
}
"
)
endif
()
if
(
location_onnxruntime_header_dir AND location_onnxruntime_lib
)
add_library
(
onnxruntime SHARED IMPORTED
)
...
...
@@ -202,6 +248,12 @@ if(location_onnxruntime_header_dir AND location_onnxruntime_lib)
IMPORTED_LOCATION
${
location_onnxruntime_lib
}
INTERFACE_INCLUDE_DIRECTORIES
"
${
location_onnxruntime_header_dir
}
"
)
if
(
SHERPA_ONNX_ENABLE_GPU AND location_onnxruntime_cuda_lib
)
add_library
(
onnxruntime_providers_cuda SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_cuda_lib
}
)
endif
()
else
()
message
(
STATUS
"Could not find a pre-installed onnxruntime. Downloading pre-compiled onnxruntime"
)
download_onnxruntime
()
...
...
sherpa-onnx/csrc/CMakeLists.txt
查看文件 @
b8fbf8e
...
...
@@ -78,6 +78,12 @@ target_link_libraries(sherpa-onnx-core
kaldi-native-fbank-core
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
target_link_libraries
(
sherpa-onnx-core
onnxruntime_providers_cuda
)
endif
()
if
(
SHERPA_ONNX_ENABLE_CHECK
)
target_compile_definitions
(
sherpa-onnx-core PUBLIC SHERPA_ONNX_ENABLE_CHECK=1
)
...
...
sherpa-onnx/csrc/session.cc
查看文件 @
b8fbf8e
...
...
@@ -4,8 +4,10 @@
#include "sherpa-onnx/csrc/session.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/provider.h"
...
...
@@ -27,10 +29,20 @@ static Ort::SessionOptions GetSessionOptionsImpl(int32_t num_threads,
case
Provider
:
:
kCPU
:
break
;
// nothing to do for the CPU provider
case
Provider
:
:
kCUDA
:
{
std
::
vector
<
std
::
string
>
available_providers
=
Ort
::
GetAvailableProviders
();
if
(
std
::
find
(
available_providers
.
begin
(),
available_providers
.
end
(),
"CUDAExecutionProvider"
)
!=
available_providers
.
end
())
{
// The CUDA provider is available, proceed with setting the options
OrtCUDAProviderOptions
options
;
options
.
device_id
=
0
;
// set more options on need
sess_opts
.
AppendExecutionProvider_CUDA
(
options
);
}
else
{
SHERPA_ONNX_LOGE
(
"Please compile with -DSHERPA_ONNX_ENABLE_GPU=ON. Fallback to "
"cpu!"
);
}
break
;
}
case
Provider
:
:
kCoreML
:
{
...
...
请
注册
或
登录
后发表评论