Fangjun Kuang
Committed by GitHub

Set batch size to 1 for more streaming ASR models (#1280)

... ... @@ -7,7 +7,6 @@ on:
workflow_dispatch:
concurrency:
group: mobile-asr-models-${{ github.ref }}
cancel-in-progress: true
... ... @@ -16,11 +15,14 @@ jobs:
mobile-asr-models:
if: github.repository_owner == 'k2-fsa' || github.repository_owner == 'csukuangfj' || github.repository_owner == 'csu-fangjun'
runs-on: ${{ matrix.os }}
name: ${{ matrix.index }}/${{ matrix.total }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.8"]
total: ["11"]
index: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
steps:
- uses: actions/checkout@v4
... ... @@ -33,7 +35,20 @@ jobs:
- name: Install dependencies
shell: bash
run: |
python3 -m pip install onnxruntime==1.16.3 onnx==1.15.0
python3 -m pip install onnxruntime==1.16.3 onnx==1.15.0 jinja2
- name: Generate build script
shell: bash
run: |
cd scripts/mobile-asr-models
total=${{ matrix.total }}
index=${{ matrix.index }}
./generate-asr.py --total $total --index $index
chmod +x run2.sh
mv run2.sh run.sh
ls -lh
- name: Run
shell: bash
... ...
name: mobile-kws-models
on:
push:
branches:
- asr-mobile
workflow_dispatch:
concurrency:
group: mobile-kws-models-${{ github.ref }}
cancel-in-progress: true
jobs:
mobile-kws-models:
if: github.repository_owner == 'k2-fsa' || github.repository_owner == 'csukuangfj' || github.repository_owner == 'csu-fangjun'
runs-on: ${{ matrix.os }}
name: ${{ matrix.index }}/${{ matrix.total }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.8"]
total: ["2"]
index: ["0", "1"]
steps:
- uses: actions/checkout@v4
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
shell: bash
run: |
python3 -m pip install onnxruntime==1.16.3 onnx==1.15.0 jinja2
- name: Generate build script
shell: bash
run: |
cd scripts/mobile-asr-models
total=${{ matrix.total }}
index=${{ matrix.index }}
./generate-kws.py --total $total --index $index
chmod +x run2.sh
mv run2.sh run.sh
ls -lh
- name: Run
shell: bash
run: |
cd scripts/mobile-asr-models
./run.sh
- name: Release
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
file: ./kws/*.tar.bz2
overwrite: true
repo_name: k2-fsa/sherpa-onnx
repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
tag: kws-models
... ...
... ... @@ -2,7 +2,6 @@
import argparse
from dataclasses import dataclass
from typing import List, Optional
import jinja2
... ...
... ... @@ -2,7 +2,6 @@
import argparse
from dataclasses import dataclass
from typing import List, Optional
import jinja2
... ...
... ... @@ -2,7 +2,6 @@
import argparse
from dataclasses import dataclass
from typing import List, Optional
import jinja2
... ...
... ... @@ -2,7 +2,6 @@
import argparse
from dataclasses import dataclass
from typing import List, Optional
import jinja2
... ...
... ... @@ -2,7 +2,7 @@
import argparse
from dataclasses import dataclass
from typing import List, Optional
from typing import List
import jinja2
... ... @@ -34,76 +34,99 @@ class SpeakerIdentificationModel:
def get_3dspeaker_models() -> List[SpeakerIdentificationModel]:
models = [
SpeakerIdentificationModel(model_name="3dspeaker_speech_campplus_sv_en_voxceleb_16k.onnx"),
SpeakerIdentificationModel(model_name="3dspeaker_speech_campplus_sv_zh-cn_16k-common.onnx"),
SpeakerIdentificationModel(model_name="3dspeaker_speech_eres2net_base_200k_sv_zh-cn_16k-common.onnx"),
SpeakerIdentificationModel(model_name="3dspeaker_speech_eres2net_base_sv_zh-cn_3dspeaker_16k.onnx"),
SpeakerIdentificationModel(model_name="3dspeaker_speech_eres2net_large_sv_zh-cn_3dspeaker_16k.onnx"),
SpeakerIdentificationModel(model_name="3dspeaker_speech_eres2net_sv_en_voxceleb_16k.onnx"),
SpeakerIdentificationModel(model_name="3dspeaker_speech_eres2net_sv_zh-cn_16k-common.onnx"),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_campplus_sv_en_voxceleb_16k.onnx"
),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_campplus_sv_zh-cn_16k-common.onnx"
),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_eres2net_base_200k_sv_zh-cn_16k-common.onnx"
),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_eres2net_base_sv_zh-cn_3dspeaker_16k.onnx"
),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_eres2net_large_sv_zh-cn_3dspeaker_16k.onnx"
),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_eres2net_sv_en_voxceleb_16k.onnx"
),
SpeakerIdentificationModel(
model_name="3dspeaker_speech_eres2net_sv_zh-cn_16k-common.onnx"
),
]
prefix = '3dspeaker_speech_'
prefix = "3dspeaker_speech_"
num = len(prefix)
for m in models:
m.framework = '3dspeaker'
m.framework = "3dspeaker"
m.short_name = m.model_name[num:-5]
if '_zh-cn_' in m.model_name:
m.lang = 'zh'
elif '_en_' in m.model_name:
m.lang = 'en'
if "_zh-cn_" in m.model_name:
m.lang = "zh"
elif "_en_" in m.model_name:
m.lang = "en"
else:
raise ValueError(m)
return models
def get_wespeaker_models() -> List[SpeakerIdentificationModel]:
models = [
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_CAM++.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_CAM++_LM.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_resnet152_LM.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_resnet221_LM.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_resnet293_LM.onnx"),
SpeakerIdentificationModel(
model_name="wespeaker_en_voxceleb_resnet152_LM.onnx"
),
SpeakerIdentificationModel(
model_name="wespeaker_en_voxceleb_resnet221_LM.onnx"
),
SpeakerIdentificationModel(
model_name="wespeaker_en_voxceleb_resnet293_LM.onnx"
),
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_resnet34.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_en_voxceleb_resnet34_LM.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_zh_cnceleb_resnet34.onnx"),
SpeakerIdentificationModel(model_name="wespeaker_zh_cnceleb_resnet34_LM.onnx"),
]
prefix = 'wespeaker_xx_'
prefix = "wespeaker_xx_"
num = len(prefix)
for m in models:
m.framework = 'wespeaker'
m.framework = "wespeaker"
m.short_name = m.model_name[num:-5]
if '_zh_' in m.model_name:
m.lang = 'zh'
elif '_en_' in m.model_name:
m.lang = 'en'
if "_zh_" in m.model_name:
m.lang = "zh"
elif "_en_" in m.model_name:
m.lang = "en"
else:
raise ValueError(m)
return models
def get_nemo_models() -> List[SpeakerIdentificationModel]:
models = [
SpeakerIdentificationModel(model_name="nemo_en_speakerverification_speakernet.onnx"),
SpeakerIdentificationModel(
model_name="nemo_en_speakerverification_speakernet.onnx"
),
SpeakerIdentificationModel(model_name="nemo_en_titanet_large.onnx"),
SpeakerIdentificationModel(model_name="nemo_en_titanet_small.onnx"),
]
prefix = 'nemo_en_'
prefix = "nemo_en_"
num = len(prefix)
for m in models:
m.framework = 'nemo'
m.framework = "nemo"
m.short_name = m.model_name[num:-5]
if '_zh_' in m.model_name:
m.lang = 'zh'
elif '_en_' in m.model_name:
m.lang = 'en'
if "_zh_" in m.model_name:
m.lang = "zh"
elif "_en_" in m.model_name:
m.lang = "en"
else:
raise ValueError(m)
return models
def main():
args = get_args()
index = args.index
... ...
... ... @@ -2,7 +2,6 @@
import argparse
from dataclasses import dataclass
from typing import List, Optional
import jinja2
... ...
... ... @@ -16,3 +16,97 @@ https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipform
The following [colab notebook](https://colab.research.google.com/drive/1RsVZbsxbPjazeGrNNbZNjXCYbEG2F2DU?usp=sharing)
provides examples to use the above two models.
**WARNING**: Tested with `onnxruntime==1.16.3 onnx==1.15.0`.
```bash
pip install onnxruntime==1.16.3 onnx==1.15.0
```
## More examples
### [sherpa-onnx-streaming-zipformer-korean-2024-06-16](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#sherpa-onnx-streaming-zipformer-korean-2024-06-16-korean)
| | encoder-epoch-99-avg-1.onnx | encoder-epoch-99-avg-1.int8.onnx|
|---|---|---|
|Dynamic batch size| 279 MB| 122 MB|
|Batch size fixed to 1| 264 MB | 107 MB |
### [sherpa-onnx-streaming-zipformer-en-20M-2023-02-17](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-en-20m-2023-02-17-english)
| | encoder-epoch-99-avg-1.onnx | encoder-epoch-99-avg-1.int8.onnx|
|---|---|---|
|Dynamic batch size| 85 MB| 41 MB|
|Batch size fixed to 1| 75 MB | 32 MB |
### [sherpa-onnx-streaming-zipformer-multi-zh-hans-2023-12-12](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#sherpa-onnx-streaming-zipformer-multi-zh-hans-2023-12-12-chinese)
| | encoder-epoch-20-avg-1-chunk-16-left-128.onnx | encoder-epoch-20-avg-1-chunk-16-left-128.int8.onnx|
|---|---|---|
|Dynamic batch size| 249 MB| 67 MB|
|Batch size fixed to 1| 247 MB | 65 MB |
### [icefall-asr-zipformer-streaming-wenetspeech-20230615](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#pkufool-icefall-asr-zipformer-streaming-wenetspeech-20230615-chinese)
| | encoder-epoch-12-avg-4-chunk-16-left-128.onnx | encoder-epoch-12-avg-4-chunk-16-left-128.int8.onnx|
|---|---|---|
|Dynamic batch size| 250 MB| 68 MB|
|Batch size fixed to 1| 247 MB | 65 MB |
### [sherpa-onnx-streaming-zipformer-en-2023-06-26](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-en-2023-06-26-english)
| | encoder-epoch-99-avg-1-chunk-16-left-128.onnx | encoder-epoch-99-avg-1-chunk-16-left-128.int8.onnx|
|---|---|---|
|Dynamic batch size| 250 MB| 68 MB|
|Batch size fixed to 1| 247 MB | 65 MB |
### [sherpa-onnx-streaming-zipformer-en-2023-06-21](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-en-2023-06-21-english)
| | encoder-epoch-99-avg-1.onnx | encoder-epoch-99-avg-1.int8.onnx|
|---|---|---|
|Dynamic batch size| 338 MB| 180 MB|
|Batch size fixed to 1| 264 MB | 107 MB |
### [sherpa-onnx-streaming-zipformer-en-2023-02-21](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-en-2023-02-21-english)
| | encoder-epoch-99-avg-1.onnx | encoder-epoch-99-avg-1.int8.onnx|
|---|---|---|
|Dynamic batch size| 279 MB| 122 MB|
|Batch size fixed to 1| 264 MB | 107 MB |
### [sherpa-onnx-streaming-zipformer-fr-2023-04-14](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#shaojieli-sherpa-onnx-streaming-zipformer-fr-2023-04-14-french)
| | encoder-epoch-29-avg-9-with-averaged-model.onnx | encoder-epoch-29-avg-9-with-averaged-model.int8.onnx|
|---|---|---|
|Dynamic batch size| 279 MB| 121 MB|
|Batch size fixed to 1| 264 MB | 107 MB |
### [sherpa-onnx-streaming-zipformer-small-bilingual-zh-en-2023-02-16](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#sherpa-onnx-streaming-zipformer-small-bilingual-zh-en-2023-02-16-bilingual-chinese-english)
| | encoder-epoch-99-avg-1.onnx | encoder-epoch-99-avg-1.int8.onnx|
|---|---|---|
|Dynamic batch size| 85 MB| 41 MB|
|Batch size fixed to 1| 75 MB | 32 MB |
### [sherpa-onnx-streaming-zipformer-zh-14M-2023-02-23](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-zh-14m-2023-02-23-chinese)
| | encoder-epoch-99-avg-1.onnx | encoder-epoch-99-avg-1.int8.onnx|
|---|---|---|
|Dynamic batch size| 40 MB| 21 MB|
|Batch size fixed to 1| 33 MB | 15 MB |
### [sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01](https://k2-fsa.github.io/sherpa/onnx/kws/pretrained_models/index.html#sherpa-onnx-kws-zipformer-wenetspeech-3-3m-2024-01-01-chinese)
| | encoder-epoch-12-avg-2-chunk-16-left-64.onnx | encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx|
|---|---|---|
|Dynamic batch size| 12 MB| 4.6 MB|
|Batch size fixed to 1| 11 MB | 3.9 MB |
### [sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01](https://k2-fsa.github.io/sherpa/onnx/kws/pretrained_models/index.html#sherpa-onnx-kws-zipformer-gigaspeech-3-3m-2024-01-01-english)
| | encoder-epoch-12-avg-2-chunk-16-left-64.onnx | encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx|
|---|---|---|
|Dynamic batch size| 12 MB| 4.6 MB|
|Batch size fixed to 1| 11 MB | 3.9 MB |
... ...
#!/usr/bin/env python3
import argparse
import onnxruntime
from onnxruntime.quantization import QuantType, quantize_dynamic
def show(filename):
session_opts = onnxruntime.SessionOptions()
session_opts.log_severity_level = 3
sess = onnxruntime.InferenceSession(filename, session_opts)
for i in sess.get_inputs():
print(i)
print("-----")
for i in sess.get_outputs():
print(i)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
... ... @@ -25,6 +39,9 @@ def get_args():
def main():
args = get_args()
print(vars(args))
print(f"----------{args.input}----------")
show(args.input)
print("------------------------------")
quantize_dynamic(
model_input=args.input,
... ...
#!/usr/bin/env python3
import argparse
from dataclasses import dataclass
import jinja2
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--total",
type=int,
default=1,
help="Number of runners",
)
parser.add_argument(
"--index",
type=int,
default=0,
help="Index of the current runner",
)
return parser.parse_args()
@dataclass
class Model:
# We will download
# https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/{model_name}.tar.bz2
model_name: str
cmd: str
def get_streaming_zipformer_transducer_models():
models = [
Model(
model_name="sherpa-onnx-streaming-zipformer-korean-2024-06-16",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-multi-zh-hans-2023-12-12",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-20-avg-1-chunk-16-left-128.onnx \
--output1 $dst/encoder-epoch-20-avg-1-chunk-16-left-128.onnx \
--output2 $dst/encoder-epoch-20-avg-1-chunk-16-left-128.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-20-avg-1-chunk-16-left-128.onnx $dst/
cp -v $src/joiner-epoch-20-avg-1-chunk-16-left-128.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="icefall-asr-zipformer-streaming-wenetspeech-20230615",
cmd="""
./run-impl.sh \
--input $src/exp/encoder-epoch-12-avg-4-chunk-16-left-128.onnx \
--output1 $dst/encoder-epoch-12-avg-4-chunk-16-left-128.onnx \
--output2 $dst/encoder-epoch-12-avg-4-chunk-16-left-128.int8.onnx
cp -fv $src/README.md $dst/
cp -v $src/data/lang_char/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/exp/decoder-epoch-12-avg-4-chunk-16-left-128.onnx $dst/
cp -v $src/exp/joiner-epoch-12-avg-4-chunk-16-left-128.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-en-2023-06-26",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1-chunk-16-left-128.onnx \
--output1 $dst/encoder-epoch-99-avg-1-chunk-16-left-128.onnx \
--output2 $dst/encoder-epoch-99-avg-1-chunk-16-left-128.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1-chunk-16-left-128.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1-chunk-16-left-128.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-en-2023-06-21",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
cp -fv $src/README.md $dst/
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-en-2023-02-21",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/ || true
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
cp -v $src/README.md $dst/
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-fr-2023-04-14",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-29-avg-9-with-averaged-model.onnx \
--output1 $dst/encoder-epoch-29-avg-9-with-averaged-model.onnx \
--output2 $dst/encoder-epoch-29-avg-9-with-averaged-model.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/ || true
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-29-avg-9-with-averaged-model.onnx $dst/
cp -v $src/joiner-epoch-29-avg-9-with-averaged-model.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-small-bilingual-zh-en-2023-02-16",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
mkdir $dst/{64,96}
./run-impl.sh \
--input $src/64/encoder-epoch-99-avg-1.onnx \
--output1 $dst/64/encoder-epoch-99-avg-1.onnx \
--output2 $dst/64/encoder-epoch-99-avg-1.int8.onnx
./run-impl.sh \
--input $src/96/encoder-epoch-99-avg-1.onnx \
--output1 $dst/96/encoder-epoch-99-avg-1.onnx \
--output2 $dst/96/encoder-epoch-99-avg-1.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/ || true
cp -av $src/test_wavs $dst/
cp -v $src/tokens.txt $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cp -v $src/tokens.txt $dst/64/
cp -v $src/64/decoder-epoch-99-avg-1.onnx $dst/64/
cp -v $src/64/joiner-epoch-99-avg-1.int8.onnx $dst/64/
cp -v $src/tokens.txt $dst/96/
cp -v $src/96/decoder-epoch-99-avg-1.onnx $dst/96/
cp -v $src/96/joiner-epoch-99-avg-1.int8.onnx $dst/96/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-zh-14M-2023-02-23",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/ || true
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-streaming-zipformer-en-20M-2023-02-17",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-99-avg-1.onnx \
--output1 $dst/encoder-epoch-99-avg-1.onnx \
--output2 $dst/encoder-epoch-99-avg-1.int8.onnx
cp -v $src/bpe.model $dst/ || true
cp -v $src/README.md $dst/ || true
cp -v $src/tokens.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-99-avg-1.onnx $dst/
cp -v $src/joiner-epoch-99-avg-1.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
]
return models
def get_models():
return get_streaming_zipformer_transducer_models()
def main():
args = get_args()
index = args.index
total = args.total
assert 0 <= index < total, (index, total)
all_model_list = get_models()
num_models = len(all_model_list)
num_per_runner = num_models // total
if num_per_runner <= 0:
raise ValueError(f"num_models: {num_models}, num_runners: {total}")
start = index * num_per_runner
end = start + num_per_runner
remaining = num_models - args.total * num_per_runner
print(f"{index}/{total}: {start}-{end}/{num_models}")
d = dict()
d["model_list"] = all_model_list[start:end]
if index < remaining:
s = args.total * num_per_runner + index
d["model_list"].append(all_model_list[s])
print(f"{s}/{num_models}")
filename_list = [
"./run2.sh",
]
for filename in filename_list:
environment = jinja2.Environment()
with open(f"{filename}.in") as f:
s = f.read()
template = environment.from_string(s)
s = template.render(**d)
with open(filename, "w") as f:
print(s, file=f)
if __name__ == "__main__":
main()
... ...
#!/usr/bin/env python3
import argparse
from dataclasses import dataclass
import jinja2
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--total",
type=int,
default=1,
help="Number of runners",
)
parser.add_argument(
"--index",
type=int,
default=0,
help="Index of the current runner",
)
return parser.parse_args()
@dataclass
class Model:
# We will download
# https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/{model_name}.tar.bz2
model_name: str
cmd: str
def get_kws_models():
models = [
Model(
model_name="sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-12-avg-2-chunk-16-left-64.onnx \
--output1 $dst/encoder-epoch-12-avg-2-chunk-16-left-64.onnx \
--output2 $dst/encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx
cp -v $src/README.md $dst/
cp -v $src/*.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-12-avg-2-chunk-16-left-64.onnx $dst/
cp -v $src/joiner-epoch-12-avg-2-chunk-16-left-64.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/kws-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
Model(
model_name="sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01",
cmd="""
./run-impl.sh \
--input $src/encoder-epoch-12-avg-2-chunk-16-left-64.onnx \
--output1 $dst/encoder-epoch-12-avg-2-chunk-16-left-64.onnx \
--output2 $dst/encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx
cp -v $src/bpe.model $dst/
cp -v $src/README.md $dst/
cp -v $src/*.txt $dst/
cp -av $src/test_wavs $dst/
cp -v $src/decoder-epoch-12-avg-2-chunk-16-left-64.onnx $dst/
cp -v $src/joiner-epoch-12-avg-2-chunk-16-left-64.int8.onnx $dst/
cat > $dst/notes.md <<EOF
# Introduction
This model is converted from
https://github.com/k2-fsa/sherpa-onnx/releases/download/kws-models/$src.tar.bz2
and it supports only batch size equal to 1.
EOF
""",
),
]
return models
def get_models():
return get_kws_models()
def main():
args = get_args()
index = args.index
total = args.total
assert 0 <= index < total, (index, total)
all_model_list = get_models()
num_models = len(all_model_list)
num_per_runner = num_models // total
if num_per_runner <= 0:
raise ValueError(f"num_models: {num_models}, num_runners: {total}")
start = index * num_per_runner
end = start + num_per_runner
remaining = num_models - args.total * num_per_runner
print(f"{index}/{total}: {start}-{end}/{num_models}")
d = dict()
d["model_list"] = all_model_list[start:end]
if index < remaining:
s = args.total * num_per_runner + index
d["model_list"].append(all_model_list[s])
print(f"{s}/{num_models}")
filename_list = [
"./run2.sh",
]
for filename in filename_list:
environment = jinja2.Environment()
with open(f"{filename}.in") as f:
s = f.read()
template = environment.from_string(s)
s = template.render(**d)
with open(filename, "w") as f:
print(s, file=f)
if __name__ == "__main__":
main()
... ...
... ... @@ -11,6 +11,7 @@ input=
output1=
output2=
batch_dim=N
source ./parse_options.sh
if [ -z $input ]; then
... ... @@ -35,6 +36,7 @@ echo "output2: $output2"
python3 -m onnxruntime.tools.make_dynamic_shape_fixed --dim_param $batch_dim --dim_value 1 $input tmp.fixed.onnx
python3 -m onnxruntime.quantization.preprocess --input tmp.fixed.onnx --output $output1
python3 ./dynamic_quantization.py --input $output1 --output $output2
ls -lh $input tmp.fixed.onnx $output1 $output2
... ...
#!/usr/bin/env bash
set -e
{% for model in model_list %}
src={{ model.model_name }}
if [[ $src == *kws* ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/kws-models/$src.tar.bz2
else
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/$src.tar.bz2
fi
tar xvf $src.tar.bz2
rm $src.tar.bz2
dst=$src-mobile
mkdir -p $dst
{{ model.cmd }}
echo "---$src---"
ls -lh $src
echo "---$dst---"
ls -lh $dst
rm -rf $src
tar cjfv $dst.tar.bz2 $dst
if [[ $src == *kws* ]]; then
mkdir -p ../../kws
mv *.tar.bz2 ../../kws/
else
mv *.tar.bz2 ../../
fi
rm -rf $dst
{% endfor %}
... ...