Fangjun Kuang
Committed by GitHub

Publish pre-built wheels with CUDA support for Linux aarch64. (#1507)

  1 +name: build-wheels-aarch64-cuda
  2 +
  3 +on:
  4 + push:
  5 + branches:
  6 + - wheel
  7 + workflow_dispatch:
  8 +
  9 +env:
  10 + SHERPA_ONNX_IS_IN_GITHUB_ACTIONS: 1
  11 +
  12 +concurrency:
  13 + group: build-wheels-aarch64-cuda-${{ github.ref }}
  14 + cancel-in-progress: true
  15 +
  16 +jobs:
  17 + build_wheels_aarch64_cuda:
  18 + name: ${{ matrix.manylinux }} ${{ matrix.python-version }}
  19 + runs-on: ${{ matrix.os }}
  20 + strategy:
  21 + fail-fast: false
  22 + matrix:
  23 + os: [ubuntu-latest]
  24 + python-version: ["cp37", "cp38", "cp39", "cp310", "cp311", "cp312", "cp313"]
  25 + manylinux: [manylinux2014] #, manylinux_2_28]
  26 +
  27 + steps:
  28 + - uses: actions/checkout@v4
  29 +
  30 + - name: Set up QEMU
  31 + uses: docker/setup-qemu-action@v2
  32 + with:
  33 + platforms: all
  34 +
  35 + # see https://cibuildwheel.readthedocs.io/en/stable/changelog/
  36 + # for a list of versions
  37 + - name: Build wheels
  38 + uses: pypa/cibuildwheel@v2.21.3
  39 + env:
  40 + CIBW_BEFORE_ALL: |
  41 + git clone --depth 1 --branch v1.2.12 https://github.com/alsa-project/alsa-lib
  42 + cd alsa-lib
  43 + ./gitcompile
  44 + cd ..
  45 + echo "PWD"
  46 + ls -lh /project/alsa-lib/src/.libs
  47 +
  48 + CIBW_ENVIRONMENT: CPLUS_INCLUDE_PATH=/project/alsa-lib/include:$CPLUS_INCLUDE_PATH SHERPA_ONNX_ALSA_LIB_DIR=/project/alsa-lib/src/.libs LD_LIBRARY_PATH=/project/build/bdist.linux-x86_64/wheel/sherpa_onnx/lib:$SHERPA_ONNX_ALSA_LIB_DIR SHERPA_ONNX_MAKE_ARGS="VERBOSE=1" SHERPA_ONNX_ENABLE_ALSA=1 SHERPA_ONNX_ENABLE_GPU=ON
  49 + CIBW_BUILD: "${{ matrix.python-version}}-* "
  50 + CIBW_SKIP: "cp27-* cp35-* cp36-* *-win32 pp* *-musllinux* *-manylinux_i686"
  51 + CIBW_BUILD_VERBOSITY: 3
  52 + CIBW_ARCHS_LINUX: aarch64
  53 + CIBW_MANYLINUX_AARCH64_IMAGE: quay.io/pypa/${{ matrix.manylinux }}_aarch64
  54 + # From onnxruntime >= 1.17.0, it drops support for CentOS 7.0 and it supports only manylinux_2_28.
  55 + # manylinux_2_24 is no longer supported
  56 +
  57 + - name: Display wheels
  58 + shell: bash
  59 + run: |
  60 + ls -lh ./wheelhouse/
  61 +
  62 + - name: Install patchelf
  63 + if: matrix.os == 'ubuntu-latest'
  64 + shell: bash
  65 + run: |
  66 + sudo apt-get update -q
  67 + sudo apt-get install -q -y patchelf
  68 + patchelf --help
  69 +
  70 + - name: Patch wheels
  71 + shell: bash
  72 + if: matrix.os == 'ubuntu-latest'
  73 + run: |
  74 + mkdir ./wheels
  75 + sudo ./scripts/wheel/patch_wheel.py --in-dir ./wheelhouse --out-dir ./wheels
  76 +
  77 + ls -lh ./wheels/
  78 + rm -rf ./wheelhouse
  79 + mv ./wheels ./wheelhouse
  80 +
  81 + - name: Publish to huggingface
  82 + env:
  83 + HF_TOKEN: ${{ secrets.HF_TOKEN }}
  84 + uses: nick-fields/retry@v3
  85 + with:
  86 + max_attempts: 20
  87 + timeout_seconds: 200
  88 + shell: bash
  89 + command: |
  90 + git config --global user.email "csukuangfj@gmail.com"
  91 + git config --global user.name "Fangjun Kuang"
  92 +
  93 + rm -rf huggingface
  94 + export GIT_LFS_SKIP_SMUDGE=1
  95 + export GIT_CLONE_PROTECTION_ACTIVE=false
  96 +
  97 + SHERPA_ONNX_VERSION=$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
  98 + echo "SHERPA_ONNX_VERSION $SHERPA_ONNX_VERSION"
  99 +
  100 + d=cuda/$SHERPA_ONNX_VERSION
  101 +
  102 + git clone https://csukuangfj:$HF_TOKEN@huggingface.co/csukuangfj/sherpa-onnx-wheels huggingface
  103 + cd huggingface
  104 + git fetch
  105 + git pull
  106 + git merge -m "merge remote" --ff origin main
  107 +
  108 + mkdir -p $d
  109 +
  110 + cp -v ../wheelhouse/*.whl $d/
  111 +
  112 + git status
  113 + git add .
  114 + git commit -m "add more wheels"
  115 + git push https://csukuangfj:$HF_TOKEN@huggingface.co/csukuangfj/sherpa-onnx-wheels main
  116 +
  117 + - uses: actions/upload-artifact@v4
  118 + with:
  119 + name: wheel-${{ matrix.python-version }}-${{ matrix.manylinux }}
  120 + path: ./wheelhouse/*.whl
@@ -7,10 +7,28 @@ @@ -7,10 +7,28 @@
7 // 7 //
8 // clang-format off 8 // clang-format off
9 // 9 //
  10 +// cd /path/sherpa-onnx/
  11 +// mkdir build
  12 +// cd build
  13 +// cmake ..
  14 +// make
  15 +//
10 // wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2 16 // wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
11 // tar xvf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2 17 // tar xvf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
12 // rm sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2 18 // rm sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
13 // 19 //
  20 +// # 1. Test on CPU, run once
  21 +//
  22 +// ./bin/streaming-zipformer-rtf-cxx-api
  23 +//
  24 +// # 2. Test on CPU, run 10 times
  25 +//
  26 +// ./bin/streaming-zipformer-rtf-cxx-api 10
  27 +//
  28 +// # 3. Test on GPU, run 10 times
  29 +//
  30 +// ./bin/streaming-zipformer-rtf-cxx-api 10 cuda
  31 +//
14 // clang-format on 32 // clang-format on
15 33
16 #include <chrono> // NOLINT 34 #include <chrono> // NOLINT
@@ -21,13 +39,15 @@ @@ -21,13 +39,15 @@
21 39
22 int32_t main(int argc, char *argv[]) { 40 int32_t main(int argc, char *argv[]) {
23 int32_t num_runs = 1; 41 int32_t num_runs = 1;
24 - if (argc == 2) { 42 + if (argc >= 2) {
25 num_runs = atoi(argv[1]); 43 num_runs = atoi(argv[1]);
26 if (num_runs < 0) { 44 if (num_runs < 0) {
27 num_runs = 1; 45 num_runs = 1;
28 } 46 }
29 } 47 }
30 48
  49 + bool use_gpu = (argc == 3);
  50 +
31 using namespace sherpa_onnx::cxx; // NOLINT 51 using namespace sherpa_onnx::cxx; // NOLINT
32 OnlineRecognizerConfig config; 52 OnlineRecognizerConfig config;
33 53
@@ -50,6 +70,7 @@ int32_t main(int argc, char *argv[]) { @@ -50,6 +70,7 @@ int32_t main(int argc, char *argv[]) {
50 "./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt"; 70 "./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt";
51 71
52 config.model_config.num_threads = 1; 72 config.model_config.num_threads = 1;
  73 + config.model_config.provider = use_gpu ? "cuda" : "cpu";
53 74
54 std::cout << "Loading model\n"; 75 std::cout << "Loading model\n";
55 OnlineRecognizer recongizer = OnlineRecognizer::Create(config); 76 OnlineRecognizer recongizer = OnlineRecognizer::Create(config);