Fangjun Kuang
Committed by GitHub

Fix whisper test script for the latest onnxruntime. (#494)

@@ -31,7 +31,7 @@ jobs: @@ -31,7 +31,7 @@ jobs:
31 - name: Install dependencies 31 - name: Install dependencies
32 shell: bash 32 shell: bash
33 run: | 33 run: |
34 - python3 -m pip install torch==1.13.0 -f https://download.pytorch.org/whl/cpu/torch_stable.html 34 + python3 -m pip install torch==1.13.0 torchaudio==0.13.0 -f https://download.pytorch.org/whl/cpu/torch_stable.html
35 python3 -m pip install openai-whisper==20230314 onnxruntime onnx 35 python3 -m pip install openai-whisper==20230314 onnxruntime onnx
36 36
37 - name: export ${{ matrix.model }} 37 - name: export ${{ matrix.model }}
@@ -108,6 +108,19 @@ jobs: @@ -108,6 +108,19 @@ jobs:
108 repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }} 108 repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
109 tag: asr-models 109 tag: asr-models
110 110
  111 + - name: Test ${{ matrix.model }}
  112 + shell: bash
  113 + run: |
  114 + python3 -m pip install kaldi-native-fbank
  115 + git checkout .
  116 + model=${{ matrix.model }}
  117 + src=sherpa-onnx-whisper-$model
  118 + python3 scripts/whisper/test.py \
  119 + --encoder $src/$model-encoder.int8.onnx \
  120 + --decoder $src/$model-decoder.int8.onnx \
  121 + --tokens $src/$model-tokens.txt \
  122 + $src/test_wavs/0.wav
  123 +
111 - name: Publish ${{ matrix.model }} to huggingface 124 - name: Publish ${{ matrix.model }} to huggingface
112 shell: bash 125 shell: bash
113 env: 126 env:
@@ -74,11 +74,11 @@ git lfs pull --include "*.onnx" @@ -74,11 +74,11 @@ git lfs pull --include "*.onnx"
74 74
75 # remove .git to save spaces 75 # remove .git to save spaces
76 rm -rf .git 76 rm -rf .git
77 -rm README.md 77 +rm -fv README.md
78 rm -rf test_wavs 78 rm -rf test_wavs
79 rm .gitattributes 79 rm .gitattributes
80 80
81 -rm *.ort 81 +rm -fv *.ort
82 rm tiny.en-encoder.onnx 82 rm tiny.en-encoder.onnx
83 rm tiny.en-decoder.onnx 83 rm tiny.en-decoder.onnx
84 84
@@ -82,6 +82,7 @@ class OnnxModel: @@ -82,6 +82,7 @@ class OnnxModel:
82 self.encoder = ort.InferenceSession( 82 self.encoder = ort.InferenceSession(
83 encoder, 83 encoder,
84 sess_options=self.session_opts, 84 sess_options=self.session_opts,
  85 + providers=["CPUExecutionProvider"],
85 ) 86 )
86 87
87 meta = self.encoder.get_modelmeta().custom_metadata_map 88 meta = self.encoder.get_modelmeta().custom_metadata_map
@@ -113,6 +114,7 @@ class OnnxModel: @@ -113,6 +114,7 @@ class OnnxModel:
113 self.decoder = ort.InferenceSession( 114 self.decoder = ort.InferenceSession(
114 decoder, 115 decoder,
115 sess_options=self.session_opts, 116 sess_options=self.session_opts,
  117 + providers=["CPUExecutionProvider"],
116 ) 118 )
117 119
118 def run_encoder( 120 def run_encoder(