Toggle navigation
Toggle navigation
此项目
正在载入...
Sign in
xuning
/
sherpaonnx
转到一个项目
Toggle navigation
项目
群组
代码片段
帮助
Toggle navigation pinning
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Network
Create a new issue
Builds
Commits
Authored by
Fangjun Kuang
2025-09-15 17:12:45 +0800
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Committed by
GitHub
2025-09-15 17:12:45 +0800
Commit
a45384b874bf63def3c11b7837ed089a1dcd9b9f
a45384b8
1 parent
aa66810c
Provide pre-compiled whls for cuda 12.x on Linux x64 and Windows x64 (#2601)
显示空白字符变更
内嵌
并排对比
正在显示
11 个修改的文件
包含
59 行增加
和
139 行删除
.github/workflows/build-wheels-linux-cuda.yaml
.github/workflows/build-wheels-win64-cuda.yaml
.github/workflows/linux-gpu.yaml
.github/workflows/windows-x64-cuda.yaml
cmake/cmake_extension.py
cmake/onnxruntime-linux-aarch64-gpu.cmake
cmake/onnxruntime-linux-x86_64-gpu.cmake
cmake/onnxruntime-win-x64-gpu.cmake
cmake/onnxruntime.cmake
setup.py
sherpa-onnx/csrc/CMakeLists.txt
.github/workflows/build-wheels-linux-cuda.yaml
查看文件 @
a45384b
...
...
@@ -15,13 +15,14 @@ concurrency:
jobs
:
build_wheels_linux_cuda
:
name
:
${{ matrix.manylinux }} ${{ matrix.python-version }}
name
:
${{ matrix.manylinux }} ${{ matrix.python-version }}
${{ matrix.onnxruntime_version }}
runs-on
:
${{ matrix.os }}
strategy
:
fail-fast
:
false
matrix
:
os
:
[
ubuntu-22.04
]
python-version
:
[
"
3.7"
,
"
3.8"
,
"
3.9"
,
"
3.10"
,
"
3.11"
,
"
3.12"
,
"
3.13"
]
onnxruntime_version
:
[
"
1.17.1"
,
"
1.22.0"
]
steps
:
-
uses
:
actions/checkout@v4
...
...
@@ -69,6 +70,16 @@ jobs:
export SHERPA_ONNX_ENABLE_ALSA=1
export SHERPA_ONNX_CMAKE_ARGS="-DSHERPA_ONNX_ENABLE_GPU=ON"
onnxruntime_version=${{ matrix.onnxruntime_version }}
if [[ $onnxruntime_version == "1.22.0" ]]; then
curl -SL -O https://github.com/csukuangfj/onnxruntime-libs/releases/download/v1.22.0/onnxruntime-linux-x64-gpu-1.22.0-patched.zip
unzip onnxruntime-linux-x64-gpu-1.22.0-patched.zip
export SHERPA_ONNXRUNTIME_LIB_DIR=$PWD/onnxruntime-linux-x64-gpu-1.22.0-patched/lib
export SHERPA_ONNXRUNTIME_INCLUDE_DIR=$PWD/onnxruntime-linux-x64-gpu-1.22.0-patched/include
export SHERPA_ONNX_CUDA_VERSION="12.cudnn9"
fi
python3 setup.py bdist_wheel
ls -lh dist
...
...
@@ -80,6 +91,8 @@ jobs:
run
:
|
ls -lh ./wheelhouse/
unzip -l ./wheelhouse/*.whl
-
name
:
Install patchelf
shell
:
bash
run
:
|
...
...
@@ -97,9 +110,10 @@ jobs:
rm -rf ./wheelhouse
mv ./wheels ./wheelhouse
-
uses
:
actions/upload-artifact@v4
with
:
name
:
wheel-cuda-${{ matrix.python-version }}
name
:
wheel-cuda-${{ matrix.python-version }}
-${{ matrix.onnxruntime_version }}
path
:
./wheelhouse/*.whl
-
name
:
Publish to huggingface
...
...
.github/workflows/build-wheels-win64-cuda.yaml
查看文件 @
a45384b
...
...
@@ -15,13 +15,14 @@ concurrency:
jobs
:
build_wheels_win64_cuda
:
name
:
${{ matrix.python-version }}
name
:
${{ matrix.python-version }}
${{ matrix.onnxruntime_version }}
runs-on
:
${{ matrix.os }}
strategy
:
fail-fast
:
false
matrix
:
os
:
[
windows-2022
]
python-version
:
[
"
3.7"
,
"
3.8"
,
"
3.9"
,
"
3.10"
,
"
3.11"
,
"
3.12"
,
"
3.13"
]
onnxruntime_version
:
[
"
1.17.1"
,
"
1.22.0"
]
steps
:
-
uses
:
actions/checkout@v4
...
...
@@ -44,6 +45,17 @@ jobs:
export SHERPA_ONNX_CMAKE_ARGS="-DSHERPA_ONNX_ENABLE_GPU=ON"
onnxruntime_version=${{ matrix.onnxruntime_version }}
if [[ $onnxruntime_version == "1.22.0" ]]; then
curl -SL -O https://github.com/microsoft/onnxruntime/releases/download/v1.22.0/onnxruntime-win-x64-gpu-1.22.0.zip
unzip onnxruntime-win-x64-gpu-1.22.0.zip
export SHERPA_ONNXRUNTIME_LIB_DIR=$PWD/onnxruntime-win-x64-gpu-1.22.0/lib
export SHERPA_ONNXRUNTIME_INCLUDE_DIR=$PWD/onnxruntime-win-x64-gpu-1.22.0/include
export SHERPA_ONNX_CUDA_VERSION="12.cudnn9"
fi
python3 setup.py bdist_wheel
ls -lh ./dist/
...
...
@@ -54,10 +66,11 @@ jobs:
shell
:
bash
run
:
|
ls -lh ./wheelhouse/
unzip -l ./wheelhouse/*.whl
-
uses
:
actions/upload-artifact@v4
with
:
name
:
wheel-${{ matrix.python-version }}
name
:
wheel-${{ matrix.python-version }}
-${{ matrix.onnxruntime_version }}
path
:
./wheelhouse/*.whl
-
name
:
Publish to huggingface
...
...
.github/workflows/linux-gpu.yaml
查看文件 @
a45384b
...
...
@@ -100,11 +100,6 @@ jobs:
ls -lh lib
ls -lh bin
if [[ $onnxruntime_version == "1.22.0" ]]; then
cp -v $SHERPA_ONNXRUNTIME_LIB_DIR/libonnxruntime* ./lib/
cp -v $SHERPA_ONNXRUNTIME_LIB_DIR/libonnxruntime* install/lib/
fi
echo "----"
ls -lh install/lib
...
...
.github/workflows/windows-x64-cuda.yaml
查看文件 @
a45384b
...
...
@@ -76,10 +76,6 @@ jobs:
ls -lh ./bin/Release/sherpa-onnx.exe
onnxruntime_version=${{ matrix.onnxruntime_version }}
if [[ $onnxruntime_version == "1.22.0" ]]; then
cp -v ../onnxruntime-win-x64-gpu-1.22.0/lib/*.dll ./bin/Release/
cp -v ../onnxruntime-win-x64-gpu-1.22.0/lib/*.dll ./install/bin/
fi
-
name
:
Copy files
shell
:
bash
...
...
cmake/cmake_extension.py
查看文件 @
a45384b
...
...
@@ -9,6 +9,7 @@ import shutil
import
sys
from
pathlib
import
Path
import
glob
import
setuptools
from
setuptools.command.build_ext
import
build_ext
...
...
@@ -251,8 +252,6 @@ class BuildExtension(build_ext):
os
.
system
(
f
"mkdir {dst}"
)
os
.
system
(
f
"dir {dst}"
)
import
glob
ext
=
"pyd"
if
sys
.
platform
.
startswith
(
"win"
)
else
"so"
pattern
=
os
.
path
.
join
(
self
.
build_temp
,
"**"
,
f
"_sherpa_onnx.*.{ext}"
)
matches
=
glob
.
glob
(
pattern
,
recursive
=
True
)
...
...
cmake/onnxruntime-linux-aarch64-gpu.cmake
查看文件 @
a45384b
...
...
@@ -117,30 +117,6 @@ set_target_properties(onnxruntime PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"
${
onnxruntime_SOURCE_DIR
}
/include"
)
find_library
(
location_onnxruntime_cuda_lib onnxruntime_providers_cuda
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
add_library
(
onnxruntime_providers_cuda SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_cuda_lib
}
)
message
(
STATUS
"location_onnxruntime_cuda_lib:
${
location_onnxruntime_cuda_lib
}
"
)
# for libonnxruntime_providers_shared.so
find_library
(
location_onnxruntime_providers_shared_lib onnxruntime_providers_shared
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
add_library
(
onnxruntime_providers_shared SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_shared PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_providers_shared_lib
}
)
message
(
STATUS
"location_onnxruntime_providers_shared_lib:
${
location_onnxruntime_providers_shared_lib
}
"
)
file
(
GLOB onnxruntime_lib_files
"
${
onnxruntime_SOURCE_DIR
}
/lib/libonnxruntime*"
)
message
(
STATUS
"onnxruntime lib files:
${
onnxruntime_lib_files
}
"
)
install
(
FILES
${
onnxruntime_lib_files
}
DESTINATION lib
)
...
...
cmake/onnxruntime-linux-x86_64-gpu.cmake
查看文件 @
a45384b
...
...
@@ -73,30 +73,6 @@ set_target_properties(onnxruntime PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"
${
onnxruntime_SOURCE_DIR
}
/include"
)
find_library
(
location_onnxruntime_cuda_lib onnxruntime_providers_cuda
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
add_library
(
onnxruntime_providers_cuda SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_cuda_lib
}
)
message
(
STATUS
"location_onnxruntime_cuda_lib:
${
location_onnxruntime_cuda_lib
}
"
)
# for libonnxruntime_providers_shared.so
find_library
(
location_onnxruntime_providers_shared_lib onnxruntime_providers_shared
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
add_library
(
onnxruntime_providers_shared SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_shared PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_providers_shared_lib
}
)
message
(
STATUS
"location_onnxruntime_providers_shared_lib:
${
location_onnxruntime_providers_shared_lib
}
"
)
file
(
GLOB onnxruntime_lib_files
"
${
onnxruntime_SOURCE_DIR
}
/lib/libonnxruntime*"
)
message
(
STATUS
"onnxruntime lib files:
${
onnxruntime_lib_files
}
"
)
install
(
FILES
${
onnxruntime_lib_files
}
DESTINATION lib
)
...
...
cmake/onnxruntime-win-x64-gpu.cmake
查看文件 @
a45384b
...
...
@@ -82,52 +82,6 @@ file(COPY ${onnxruntime_SOURCE_DIR}/lib/onnxruntime.dll
${
CMAKE_BINARY_DIR
}
/bin/
${
CMAKE_BUILD_TYPE
}
)
# for onnxruntime_providers_cuda.dll
find_library
(
location_onnxruntime_providers_cuda_lib onnxruntime_providers_cuda
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
message
(
STATUS
"location_onnxruntime_providers_cuda_lib:
${
location_onnxruntime_providers_cuda_lib
}
"
)
add_library
(
onnxruntime_providers_cuda SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_providers_cuda_lib
}
INTERFACE_INCLUDE_DIRECTORIES
"
${
onnxruntime_SOURCE_DIR
}
/include"
)
set_property
(
TARGET onnxruntime_providers_cuda
PROPERTY
IMPORTED_IMPLIB
"
${
onnxruntime_SOURCE_DIR
}
/lib/onnxruntime_providers_cuda.lib"
)
# for onnxruntime_providers_shared.dll
find_library
(
location_onnxruntime_providers_shared_lib onnxruntime_providers_shared
PATHS
"
${
onnxruntime_SOURCE_DIR
}
/lib"
NO_CMAKE_SYSTEM_PATH
)
message
(
STATUS
"location_onnxruntime_providers_shared_lib:
${
location_onnxruntime_providers_shared_lib
}
"
)
add_library
(
onnxruntime_providers_shared SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_shared PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_providers_shared_lib
}
INTERFACE_INCLUDE_DIRECTORIES
"
${
onnxruntime_SOURCE_DIR
}
/include"
)
set_property
(
TARGET onnxruntime_providers_shared
PROPERTY
IMPORTED_IMPLIB
"
${
onnxruntime_SOURCE_DIR
}
/lib/onnxruntime_providers_shared.lib"
)
file
(
COPY
${
onnxruntime_SOURCE_DIR
}
/lib/onnxruntime_providers_cuda.dll
${
onnxruntime_SOURCE_DIR
}
/lib/onnxruntime_providers_shared.dll
DESTINATION
${
CMAKE_BINARY_DIR
}
/bin/
${
CMAKE_BUILD_TYPE
}
)
file
(
GLOB onnxruntime_lib_files
"
${
onnxruntime_SOURCE_DIR
}
/lib/*.dll"
)
message
(
STATUS
"onnxruntime lib files:
${
onnxruntime_lib_files
}
"
)
...
...
cmake/onnxruntime.cmake
查看文件 @
a45384b
...
...
@@ -155,6 +155,7 @@ if(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE)
elseif
(
WIN32
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
set
(
location_onnxruntime_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/onnxruntime.dll
)
set
(
location_onnxruntime_lib2 $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/onnxruntime.lib
)
else
()
set
(
location_onnxruntime_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/onnxruntime.lib
)
if
(
SHERPA_ONNX_ENABLE_DIRECTML
)
...
...
@@ -175,18 +176,6 @@ if(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE)
set
(
onnxruntime_lib_files $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime.a
)
message
(
"Use static lib:
${
onnxruntime_lib_files
}
"
)
endif
()
if
(
SHERPA_ONNX_ENABLE_GPU
)
if
(
WIN32
)
set
(
location_onnxruntime_cuda_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/onnxruntime_providers_cuda.dll
)
else
()
set
(
location_onnxruntime_cuda_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime_providers_cuda.so
)
endif
()
if
(
NOT EXISTS
${
location_onnxruntime_cuda_lib
}
)
set
(
location_onnxruntime_cuda_lib $ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime_providers_cuda.a
)
endif
()
endif
()
else
()
find_library
(
location_onnxruntime_lib onnxruntime
PATHS
...
...
@@ -194,37 +183,41 @@ if(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE)
/usr/lib
/usr/local/lib
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
find_library
(
location_onnxruntime_cuda_lib onnxruntime_providers_cuda
PATHS
/lib
/usr/lib
/usr/local/lib
)
endif
()
endif
()
message
(
STATUS
"location_onnxruntime_lib:
${
location_onnxruntime_lib
}
"
)
if
(
SHERPA_ONNX_ENABLE_GPU
)
message
(
STATUS
"location_onnxruntime_cuda_lib:
${
location_onnxruntime_cuda_lib
}
"
)
endif
()
endif
()
if
(
location_onnxruntime_header_dir AND location_onnxruntime_lib
)
if
(
NOT DEFINED onnxruntime_lib_files
)
add_library
(
onnxruntime SHARED IMPORTED
)
if
(
WIN32
)
set_target_properties
(
onnxruntime PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_lib
}
IMPORTED_IMPLIB
${
location_onnxruntime_lib
}
IMPORTED_IMPLIB
${
location_onnxruntime_lib2
}
INTERFACE_INCLUDE_DIRECTORIES
"
${
location_onnxruntime_header_dir
}
"
)
if
(
SHERPA_ONNX_ENABLE_GPU AND location_onnxruntime_cuda_lib
)
add_library
(
onnxruntime_providers_cuda SHARED IMPORTED
)
set_target_properties
(
onnxruntime_providers_cuda PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_cuda_lib
}
else
()
set_target_properties
(
onnxruntime PROPERTIES
IMPORTED_LOCATION
${
location_onnxruntime_lib
}
INTERFACE_INCLUDE_DIRECTORIES
"
${
location_onnxruntime_header_dir
}
"
)
endif
()
if
(
WIN32
)
file
(
GLOB onnxruntime_lib_files
"$ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/*.dll"
)
else
()
file
(
GLOB onnxruntime_lib_files
"$ENV{SHERPA_ONNXRUNTIME_LIB_DIR}/libonnxruntime*"
)
endif
()
message
(
STATUS
"onnxruntime lib files:
${
onnxruntime_lib_files
}
"
)
install
(
FILES
${
onnxruntime_lib_files
}
DESTINATION lib
)
if
(
WIN32
)
install
(
FILES
${
onnxruntime_lib_files
}
DESTINATION bin
)
endif
()
endif
()
else
()
if
(
SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE
)
...
...
setup.py
查看文件 @
a45384b
...
...
@@ -34,6 +34,10 @@ def get_package_version():
if
"-DSHERPA_ONNX_ENABLE_GPU=ON"
in
cmake_args
:
extra_version
=
"+cuda"
cuda_version
=
os
.
environ
.
get
(
"SHERPA_ONNX_CUDA_VERSION"
,
""
)
if
cuda_version
:
extra_version
+=
cuda_version
latest_version
+=
extra_version
return
latest_version
...
...
sherpa-onnx/csrc/CMakeLists.txt
查看文件 @
a45384b
...
...
@@ -291,7 +291,7 @@ if(SHERPA_ONNX_ENABLE_RKNN)
endif
()
endif
()
if
(
(
BUILD_SHARED_LIBS OR SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE
)
AND NOT DEFINED onnxruntime_lib_files
)
if
(
TARGET onnxruntime
)
target_link_libraries
(
sherpa-onnx-core onnxruntime
)
else
()
target_link_libraries
(
sherpa-onnx-core
${
onnxruntime_lib_files
}
)
...
...
请
注册
或
登录
后发表评论