Fangjun Kuang
Committed by GitHub

Add GigaAM NeMo transducer model for Russian ASR (#1467)

@@ -38,7 +38,7 @@ jobs: @@ -38,7 +38,7 @@ jobs:
38 mkdir $d/test_wavs 38 mkdir $d/test_wavs
39 rm scripts/nemo/GigaAM/model.onnx 39 rm scripts/nemo/GigaAM/model.onnx
40 mv -v scripts/nemo/GigaAM/*.int8.onnx $d/ 40 mv -v scripts/nemo/GigaAM/*.int8.onnx $d/
41 - mv -v scripts/nemo/GigaAM/*.md $d/ 41 + cp -v scripts/nemo/GigaAM/*.md $d/
42 mv -v scripts/nemo/GigaAM/*.pdf $d/ 42 mv -v scripts/nemo/GigaAM/*.pdf $d/
43 mv -v scripts/nemo/GigaAM/tokens.txt $d/ 43 mv -v scripts/nemo/GigaAM/tokens.txt $d/
44 mv -v scripts/nemo/GigaAM/*.wav $d/test_wavs/ 44 mv -v scripts/nemo/GigaAM/*.wav $d/test_wavs/
@@ -51,6 +51,34 @@ jobs: @@ -51,6 +51,34 @@ jobs:
51 51
52 tar cjvf ${d}.tar.bz2 $d 52 tar cjvf ${d}.tar.bz2 $d
53 53
  54 + - name: Run Transducer
  55 + shell: bash
  56 + run: |
  57 + pushd scripts/nemo/GigaAM
  58 + ./run-rnnt.sh
  59 + popd
  60 +
  61 + d=sherpa-onnx-nemo-transducer-giga-am-russian-2024-10-24
  62 + mkdir $d
  63 + mkdir $d/test_wavs
  64 +
  65 + mv -v scripts/nemo/GigaAM/encoder.int8.onnx $d/
  66 + mv -v scripts/nemo/GigaAM/decoder.onnx $d/
  67 + mv -v scripts/nemo/GigaAM/joiner.onnx $d/
  68 +
  69 + cp -v scripts/nemo/GigaAM/*.md $d/
  70 + mv -v scripts/nemo/GigaAM/*.pdf $d/
  71 + mv -v scripts/nemo/GigaAM/tokens.txt $d/
  72 + mv -v scripts/nemo/GigaAM/*.wav $d/test_wavs/
  73 + mv -v scripts/nemo/GigaAM/run-rnnt.sh $d/
  74 + mv -v scripts/nemo/GigaAM/*-rnnt.py $d/
  75 +
  76 + ls -lh scripts/nemo/GigaAM/
  77 +
  78 + ls -lh $d
  79 +
  80 + tar cjvf ${d}.tar.bz2 $d
  81 +
54 - name: Release 82 - name: Release
55 uses: svenstaro/upload-release-action@v2 83 uses: svenstaro/upload-release-action@v2
56 with: 84 with:
@@ -61,7 +89,7 @@ jobs: @@ -61,7 +89,7 @@ jobs:
61 repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }} 89 repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
62 tag: asr-models 90 tag: asr-models
63 91
64 - - name: Publish to huggingface (CTC) 92 + - name: Publish to huggingface (Transducer)
65 env: 93 env:
66 HF_TOKEN: ${{ secrets.HF_TOKEN }} 94 HF_TOKEN: ${{ secrets.HF_TOKEN }}
67 uses: nick-fields/retry@v3 95 uses: nick-fields/retry@v3
@@ -73,7 +101,7 @@ jobs: @@ -73,7 +101,7 @@ jobs:
73 git config --global user.email "csukuangfj@gmail.com" 101 git config --global user.email "csukuangfj@gmail.com"
74 git config --global user.name "Fangjun Kuang" 102 git config --global user.name "Fangjun Kuang"
75 103
76 - d=sherpa-onnx-nemo-ctc-giga-am-russian-2024-10-24 104 + d=sherpa-onnx-nemo-transducer-giga-am-russian-2024-10-24/
77 export GIT_LFS_SKIP_SMUDGE=1 105 export GIT_LFS_SKIP_SMUDGE=1
78 export GIT_CLONE_PROTECTION_ACTIVE=false 106 export GIT_CLONE_PROTECTION_ACTIVE=false
79 git clone https://csukuangfj:$HF_TOKEN@huggingface.co/csukuangfj/$d huggingface 107 git clone https://csukuangfj:$HF_TOKEN@huggingface.co/csukuangfj/$d huggingface
@@ -354,6 +354,24 @@ def get_models(): @@ -354,6 +354,24 @@ def get_models():
354 popd 354 popd
355 """, 355 """,
356 ), 356 ),
  357 + Model(
  358 + model_name="sherpa-onnx-nemo-transducer-giga-am-russian-2024-10-24",
  359 + idx=20,
  360 + lang="ru",
  361 + short_name="nemo_transducer_giga_am",
  362 + cmd="""
  363 + pushd $model_name
  364 +
  365 + rm -rfv test_wavs
  366 +
  367 + rm -fv *.sh
  368 + rm -fv *.py
  369 +
  370 + ls -lh
  371 +
  372 + popd
  373 + """,
  374 + ),
357 ] 375 ]
358 return models 376 return models
359 377
@@ -75,6 +75,7 @@ def add_meta_data(filename: str, meta_data: Dict[str, str]): @@ -75,6 +75,7 @@ def add_meta_data(filename: str, meta_data: Dict[str, str]):
75 onnx.save(model, filename) 75 onnx.save(model, filename)
76 76
77 77
  78 +@torch.no_grad()
78 def main(): 79 def main():
79 model = EncDecCTCModel.from_config_file("./ctc_model_config.yaml") 80 model = EncDecCTCModel.from_config_file("./ctc_model_config.yaml")
80 ckpt = torch.load("./ctc_model_weights.ckpt", map_location="cpu") 81 ckpt = torch.load("./ctc_model_weights.ckpt", map_location="cpu")
  1 +#!/usr/bin/env python3
  2 +# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
  3 +
  4 +from typing import Dict
  5 +
  6 +import onnx
  7 +import torch
  8 +import torchaudio
  9 +from nemo.collections.asr.models import EncDecRNNTBPEModel
  10 +from nemo.collections.asr.modules.audio_preprocessing import (
  11 + AudioToMelSpectrogramPreprocessor as NeMoAudioToMelSpectrogramPreprocessor,
  12 +)
  13 +from nemo.collections.asr.parts.preprocessing.features import (
  14 + FilterbankFeaturesTA as NeMoFilterbankFeaturesTA,
  15 +)
  16 +from onnxruntime.quantization import QuantType, quantize_dynamic
  17 +
  18 +
  19 +def add_meta_data(filename: str, meta_data: Dict[str, str]):
  20 + """Add meta data to an ONNX model. It is changed in-place.
  21 +
  22 + Args:
  23 + filename:
  24 + Filename of the ONNX model to be changed.
  25 + meta_data:
  26 + Key-value pairs.
  27 + """
  28 + model = onnx.load(filename)
  29 + while len(model.metadata_props):
  30 + model.metadata_props.pop()
  31 +
  32 + for key, value in meta_data.items():
  33 + meta = model.metadata_props.add()
  34 + meta.key = key
  35 + meta.value = str(value)
  36 +
  37 + onnx.save(model, filename)
  38 +
  39 +
  40 +class FilterbankFeaturesTA(NeMoFilterbankFeaturesTA):
  41 + def __init__(self, mel_scale: str = "htk", wkwargs=None, **kwargs):
  42 + if "window_size" in kwargs:
  43 + del kwargs["window_size"]
  44 + if "window_stride" in kwargs:
  45 + del kwargs["window_stride"]
  46 +
  47 + super().__init__(**kwargs)
  48 +
  49 + self._mel_spec_extractor: torchaudio.transforms.MelSpectrogram = (
  50 + torchaudio.transforms.MelSpectrogram(
  51 + sample_rate=self._sample_rate,
  52 + win_length=self.win_length,
  53 + hop_length=self.hop_length,
  54 + n_mels=kwargs["nfilt"],
  55 + window_fn=self.torch_windows[kwargs["window"]],
  56 + mel_scale=mel_scale,
  57 + norm=kwargs["mel_norm"],
  58 + n_fft=kwargs["n_fft"],
  59 + f_max=kwargs.get("highfreq", None),
  60 + f_min=kwargs.get("lowfreq", 0),
  61 + wkwargs=wkwargs,
  62 + )
  63 + )
  64 +
  65 +
  66 +class AudioToMelSpectrogramPreprocessor(NeMoAudioToMelSpectrogramPreprocessor):
  67 + def __init__(self, mel_scale: str = "htk", **kwargs):
  68 + super().__init__(**kwargs)
  69 + kwargs["nfilt"] = kwargs["features"]
  70 + del kwargs["features"]
  71 + self.featurizer = (
  72 + FilterbankFeaturesTA( # Deprecated arguments; kept for config compatibility
  73 + mel_scale=mel_scale,
  74 + **kwargs,
  75 + )
  76 + )
  77 +
  78 +
  79 +@torch.no_grad()
  80 +def main():
  81 + model = EncDecRNNTBPEModel.from_config_file("./rnnt_model_config.yaml")
  82 + ckpt = torch.load("./rnnt_model_weights.ckpt", map_location="cpu")
  83 + model.load_state_dict(ckpt, strict=False)
  84 + model.eval()
  85 +
  86 + with open("./tokens.txt", "w", encoding="utf-8") as f:
  87 + for i, s in enumerate(model.joint.vocabulary):
  88 + f.write(f"{s} {i}\n")
  89 + f.write(f"<blk> {i+1}\n")
  90 + print("Saved to tokens.txt")
  91 +
  92 + model.encoder.export("encoder.onnx")
  93 + model.decoder.export("decoder.onnx")
  94 + model.joint.export("joiner.onnx")
  95 +
  96 + meta_data = {
  97 + "vocab_size": model.decoder.vocab_size, # not including the blank
  98 + "pred_rnn_layers": model.decoder.pred_rnn_layers,
  99 + "pred_hidden": model.decoder.pred_hidden,
  100 + "normalize_type": "",
  101 + "subsampling_factor": 4,
  102 + "model_type": "EncDecRNNTBPEModel",
  103 + "version": "1",
  104 + "model_author": "https://github.com/salute-developers/GigaAM",
  105 + "license": "https://github.com/salute-developers/GigaAM/blob/main/GigaAM%20License_NC.pdf",
  106 + "language": "Russian",
  107 + "is_giga_am": 1,
  108 + }
  109 + add_meta_data("encoder.onnx", meta_data)
  110 +
  111 + quantize_dynamic(
  112 + model_input="encoder.onnx",
  113 + model_output="encoder.int8.onnx",
  114 + weight_type=QuantType.QUInt8,
  115 + )
  116 +
  117 +
  118 +if __name__ == "__main__":
  119 + main()
@@ -21,11 +21,15 @@ function install_nemo() { @@ -21,11 +21,15 @@ function install_nemo() {
21 } 21 }
22 22
23 function download_files() { 23 function download_files() {
24 - curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/ctc_model_weights.ckpt  
25 - curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/ctc_model_config.yaml  
26 - curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/example.wav  
27 - curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/long_example.wav  
28 - curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM%20License_NC.pdf 24 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/ctc_model_weights.ckpt
  25 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/ctc_model_config.yaml
  26 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/example.wav
  27 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/long_example.wav
  28 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/ctc/ctc_model_weights.ckpt
  29 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/ctc/ctc_model_config.yaml
  30 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/example.wav
  31 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/long_example.wav
  32 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/GigaAM%20License_NC.pdf
29 } 33 }
30 34
31 install_nemo 35 install_nemo
  1 +#!/usr/bin/env bash
  2 +# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
  3 +
  4 +set -ex
  5 +
  6 +function install_nemo() {
  7 + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
  8 + python3 get-pip.py
  9 +
  10 + pip install torch==2.4.0 torchaudio==2.4.0 -f https://download.pytorch.org/whl/torch_stable.html
  11 +
  12 + pip install -qq wget text-unidecode matplotlib>=3.3.2 onnx onnxruntime pybind11 Cython einops kaldi-native-fbank soundfile librosa
  13 + pip install -qq ipython
  14 +
  15 + # sudo apt-get install -q -y sox libsndfile1 ffmpeg python3-pip ipython
  16 +
  17 + BRANCH='main'
  18 + python3 -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
  19 +
  20 + pip install numpy==1.26.4
  21 +}
  22 +
  23 +function download_files() {
  24 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/rnnt_model_weights.ckpt
  25 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/rnnt_model_config.yaml
  26 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/example.wav
  27 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/long_example.wav
  28 + # curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/tokenizer_all_sets.tar
  29 +
  30 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/rnnt/rnnt_model_weights.ckpt
  31 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/rnnt/rnnt_model_config.yaml
  32 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/example.wav
  33 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/long_example.wav
  34 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/GigaAM%20License_NC.pdf
  35 + curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM/rnnt/tokenizer_all_sets.tar
  36 + tar -xf tokenizer_all_sets.tar && rm tokenizer_all_sets.tar
  37 + ls -lh
  38 + echo "---"
  39 + ls -lh tokenizer_all_sets
  40 + echo "---"
  41 +}
  42 +
  43 +install_nemo
  44 +download_files
  45 +
  46 +python3 ./export-onnx-rnnt.py
  47 +ls -lh
  48 +python3 ./test-onnx-rnnt.py
  49 +rm -v encoder.onnx
  50 +ls -lh
  1 +#!/usr/bin/env python3
  2 +# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
  3 +
  4 +import argparse
  5 +from pathlib import Path
  6 +
  7 +import kaldi_native_fbank as knf
  8 +import librosa
  9 +import numpy as np
  10 +import onnxruntime as ort
  11 +import soundfile as sf
  12 +import torch
  13 +
  14 +
  15 +def create_fbank():
  16 + opts = knf.FbankOptions()
  17 + opts.frame_opts.dither = 0
  18 + opts.frame_opts.remove_dc_offset = False
  19 + opts.frame_opts.preemph_coeff = 0
  20 + opts.frame_opts.window_type = "hann"
  21 +
  22 + # Even though GigaAM uses 400 for fft, here we use 512
  23 + # since kaldi-native-fbank only support fft for power of 2.
  24 + opts.frame_opts.round_to_power_of_two = True
  25 +
  26 + opts.mel_opts.low_freq = 0
  27 + opts.mel_opts.high_freq = 8000
  28 + opts.mel_opts.num_bins = 64
  29 +
  30 + fbank = knf.OnlineFbank(opts)
  31 + return fbank
  32 +
  33 +
  34 +def compute_features(audio, fbank):
  35 + assert len(audio.shape) == 1, audio.shape
  36 + fbank.accept_waveform(16000, audio)
  37 + ans = []
  38 + processed = 0
  39 + while processed < fbank.num_frames_ready:
  40 + ans.append(np.array(fbank.get_frame(processed)))
  41 + processed += 1
  42 + ans = np.stack(ans)
  43 + return ans
  44 +
  45 +
  46 +def display(sess):
  47 + print("==========Input==========")
  48 + for i in sess.get_inputs():
  49 + print(i)
  50 + print("==========Output==========")
  51 + for i in sess.get_outputs():
  52 + print(i)
  53 +
  54 +
  55 +"""
  56 +==========Input==========
  57 +NodeArg(name='audio_signal', type='tensor(float)', shape=['audio_signal_dynamic_axes_1', 64, 'audio_signal_dynamic_axes_2'])
  58 +NodeArg(name='length', type='tensor(int64)', shape=['length_dynamic_axes_1'])
  59 +==========Output==========
  60 +NodeArg(name='outputs', type='tensor(float)', shape=['outputs_dynamic_axes_1', 768, 'outputs_dynamic_axes_2'])
  61 +NodeArg(name='encoded_lengths', type='tensor(int64)', shape=['encoded_lengths_dynamic_axes_1'])
  62 +==========Input==========
  63 +NodeArg(name='targets', type='tensor(int32)', shape=['targets_dynamic_axes_1', 'targets_dynamic_axes_2'])
  64 +NodeArg(name='target_length', type='tensor(int32)', shape=['target_length_dynamic_axes_1'])
  65 +NodeArg(name='states.1', type='tensor(float)', shape=[1, 'states.1_dim_1', 320])
  66 +NodeArg(name='onnx::LSTM_3', type='tensor(float)', shape=[1, 1, 320])
  67 +==========Output==========
  68 +NodeArg(name='outputs', type='tensor(float)', shape=['outputs_dynamic_axes_1', 320, 'outputs_dynamic_axes_2'])
  69 +NodeArg(name='prednet_lengths', type='tensor(int32)', shape=['prednet_lengths_dynamic_axes_1'])
  70 +NodeArg(name='states', type='tensor(float)', shape=[1, 'states_dynamic_axes_1', 320])
  71 +NodeArg(name='74', type='tensor(float)', shape=[1, 'states_dynamic_axes_1', 320])
  72 +==========Input==========
  73 +NodeArg(name='encoder_outputs', type='tensor(float)', shape=['encoder_outputs_dynamic_axes_1', 768, 'encoder_outputs_dynamic_axes_2'])
  74 +NodeArg(name='decoder_outputs', type='tensor(float)', shape=['decoder_outputs_dynamic_axes_1', 320, 'decoder_outputs_dynamic_axes_2'])
  75 +==========Output==========
  76 +NodeArg(name='outputs', type='tensor(float)', shape=['outputs_dynamic_axes_1', 'outputs_dynamic_axes_2', 'outputs_dynamic_axes_3', 513])
  77 +"""
  78 +
  79 +
  80 +class OnnxModel:
  81 + def __init__(
  82 + self,
  83 + encoder: str,
  84 + decoder: str,
  85 + joiner: str,
  86 + ):
  87 + self.init_encoder(encoder)
  88 + display(self.encoder)
  89 + self.init_decoder(decoder)
  90 + display(self.decoder)
  91 + self.init_joiner(joiner)
  92 + display(self.joiner)
  93 +
  94 + def init_encoder(self, encoder):
  95 + session_opts = ort.SessionOptions()
  96 + session_opts.inter_op_num_threads = 1
  97 + session_opts.intra_op_num_threads = 1
  98 +
  99 + self.encoder = ort.InferenceSession(
  100 + encoder,
  101 + sess_options=session_opts,
  102 + providers=["CPUExecutionProvider"],
  103 + )
  104 +
  105 + meta = self.encoder.get_modelmeta().custom_metadata_map
  106 + self.normalize_type = meta["normalize_type"]
  107 + print(meta)
  108 +
  109 + self.pred_rnn_layers = int(meta["pred_rnn_layers"])
  110 + self.pred_hidden = int(meta["pred_hidden"])
  111 +
  112 + def init_decoder(self, decoder):
  113 + session_opts = ort.SessionOptions()
  114 + session_opts.inter_op_num_threads = 1
  115 + session_opts.intra_op_num_threads = 1
  116 +
  117 + self.decoder = ort.InferenceSession(
  118 + decoder,
  119 + sess_options=session_opts,
  120 + providers=["CPUExecutionProvider"],
  121 + )
  122 +
  123 + def init_joiner(self, joiner):
  124 + session_opts = ort.SessionOptions()
  125 + session_opts.inter_op_num_threads = 1
  126 + session_opts.intra_op_num_threads = 1
  127 +
  128 + self.joiner = ort.InferenceSession(
  129 + joiner,
  130 + sess_options=session_opts,
  131 + providers=["CPUExecutionProvider"],
  132 + )
  133 +
  134 + def get_decoder_state(self):
  135 + batch_size = 1
  136 + state0 = torch.zeros(self.pred_rnn_layers, batch_size, self.pred_hidden).numpy()
  137 + state1 = torch.zeros(self.pred_rnn_layers, batch_size, self.pred_hidden).numpy()
  138 + return state0, state1
  139 +
  140 + def run_encoder(self, x: np.ndarray):
  141 + # x: (T, C)
  142 + x = torch.from_numpy(x)
  143 + x = x.t().unsqueeze(0)
  144 + # x: [1, C, T]
  145 + x_lens = torch.tensor([x.shape[-1]], dtype=torch.int64)
  146 +
  147 + (encoder_out, out_len) = self.encoder.run(
  148 + [
  149 + self.encoder.get_outputs()[0].name,
  150 + self.encoder.get_outputs()[1].name,
  151 + ],
  152 + {
  153 + self.encoder.get_inputs()[0].name: x.numpy(),
  154 + self.encoder.get_inputs()[1].name: x_lens.numpy(),
  155 + },
  156 + )
  157 + # [batch_size, dim, T]
  158 + return encoder_out
  159 +
  160 + def run_decoder(
  161 + self,
  162 + token: int,
  163 + state0: np.ndarray,
  164 + state1: np.ndarray,
  165 + ):
  166 + target = torch.tensor([[token]], dtype=torch.int32).numpy()
  167 + target_len = torch.tensor([1], dtype=torch.int32).numpy()
  168 +
  169 + (
  170 + decoder_out,
  171 + decoder_out_length,
  172 + state0_next,
  173 + state1_next,
  174 + ) = self.decoder.run(
  175 + [
  176 + self.decoder.get_outputs()[0].name,
  177 + self.decoder.get_outputs()[1].name,
  178 + self.decoder.get_outputs()[2].name,
  179 + self.decoder.get_outputs()[3].name,
  180 + ],
  181 + {
  182 + self.decoder.get_inputs()[0].name: target,
  183 + self.decoder.get_inputs()[1].name: target_len,
  184 + self.decoder.get_inputs()[2].name: state0,
  185 + self.decoder.get_inputs()[3].name: state1,
  186 + },
  187 + )
  188 + return decoder_out, state0_next, state1_next
  189 +
  190 + def run_joiner(
  191 + self,
  192 + encoder_out: np.ndarray,
  193 + decoder_out: np.ndarray,
  194 + ):
  195 + # encoder_out: [batch_size, dim, 1]
  196 + # decoder_out: [batch_size, dim, 1]
  197 + logit = self.joiner.run(
  198 + [
  199 + self.joiner.get_outputs()[0].name,
  200 + ],
  201 + {
  202 + self.joiner.get_inputs()[0].name: encoder_out,
  203 + self.joiner.get_inputs()[1].name: decoder_out,
  204 + },
  205 + )[0]
  206 + # logit: [batch_size, 1, 1, vocab_size]
  207 + return logit
  208 +
  209 +
  210 +def main():
  211 + model = OnnxModel("encoder.int8.onnx", "decoder.onnx", "joiner.onnx")
  212 +
  213 + id2token = dict()
  214 + with open("./tokens.txt", encoding="utf-8") as f:
  215 + for line in f:
  216 + t, idx = line.split()
  217 + id2token[int(idx)] = t
  218 +
  219 + fbank = create_fbank()
  220 + audio, sample_rate = sf.read("./example.wav", dtype="float32", always_2d=True)
  221 + audio = audio[:, 0] # only use the first channel
  222 + if sample_rate != 16000:
  223 + audio = librosa.resample(
  224 + audio,
  225 + orig_sr=sample_rate,
  226 + target_sr=16000,
  227 + )
  228 + sample_rate = 16000
  229 +
  230 + tail_padding = np.zeros(sample_rate * 2)
  231 +
  232 + audio = np.concatenate([audio, tail_padding])
  233 +
  234 + blank = len(id2token) - 1
  235 + ans = [blank]
  236 + state0, state1 = model.get_decoder_state()
  237 + decoder_out, state0_next, state1_next = model.run_decoder(ans[-1], state0, state1)
  238 +
  239 + features = compute_features(audio, fbank)
  240 + print("audio.shape", audio.shape)
  241 + print("features.shape", features.shape)
  242 +
  243 + encoder_out = model.run_encoder(features)
  244 + # encoder_out:[batch_size, dim, T)
  245 + for t in range(encoder_out.shape[2]):
  246 + encoder_out_t = encoder_out[:, :, t : t + 1]
  247 + logits = model.run_joiner(encoder_out_t, decoder_out)
  248 + logits = torch.from_numpy(logits)
  249 + logits = logits.squeeze()
  250 + idx = torch.argmax(logits, dim=-1).item()
  251 + if idx != blank:
  252 + ans.append(idx)
  253 + state0 = state0_next
  254 + state1 = state1_next
  255 + decoder_out, state0_next, state1_next = model.run_decoder(
  256 + ans[-1], state0, state1
  257 + )
  258 +
  259 + ans = ans[1:] # remove the first blank
  260 + print(ans)
  261 + tokens = [id2token[i] for i in ans]
  262 + underline = "▁"
  263 + # underline = b"\xe2\x96\x81".decode()
  264 + text = "".join(tokens).replace(underline, " ").strip()
  265 + print("./example.wav")
  266 + print(text)
  267 +
  268 +
  269 +if __name__ == "__main__":
  270 + main()
@@ -166,7 +166,8 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create( @@ -166,7 +166,8 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create(
166 return std::make_unique<OfflineRecognizerParaformerImpl>(config); 166 return std::make_unique<OfflineRecognizerParaformerImpl>(config);
167 } 167 }
168 168
169 - if (model_type == "EncDecHybridRNNTCTCBPEModel" && 169 + if ((model_type == "EncDecHybridRNNTCTCBPEModel" ||
  170 + model_type == "EncDecRNNTBPEModel") &&
170 !config.model_config.transducer.decoder_filename.empty() && 171 !config.model_config.transducer.decoder_filename.empty() &&
171 !config.model_config.transducer.joiner_filename.empty()) { 172 !config.model_config.transducer.joiner_filename.empty()) {
172 return std::make_unique<OfflineRecognizerTransducerNeMoImpl>(config); 173 return std::make_unique<OfflineRecognizerTransducerNeMoImpl>(config);
@@ -191,6 +192,7 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create( @@ -191,6 +192,7 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create(
191 " - EncDecCTCModelBPE models from NeMo\n" 192 " - EncDecCTCModelBPE models from NeMo\n"
192 " - EncDecCTCModel models from NeMo\n" 193 " - EncDecCTCModel models from NeMo\n"
193 " - EncDecHybridRNNTCTCBPEModel models from NeMo\n" 194 " - EncDecHybridRNNTCTCBPEModel models from NeMo\n"
  195 + " - EncDecRNNTBPEModel models from NeMO"
194 " - Whisper models\n" 196 " - Whisper models\n"
195 " - Tdnn models\n" 197 " - Tdnn models\n"
196 " - Zipformer CTC models\n" 198 " - Zipformer CTC models\n"
@@ -338,7 +340,8 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create( @@ -338,7 +340,8 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create(
338 return std::make_unique<OfflineRecognizerParaformerImpl>(mgr, config); 340 return std::make_unique<OfflineRecognizerParaformerImpl>(mgr, config);
339 } 341 }
340 342
341 - if (model_type == "EncDecHybridRNNTCTCBPEModel" && 343 + if ((model_type == "EncDecHybridRNNTCTCBPEModel" ||
  344 + model_type == "EncDecRNNTBPEModel") &&
342 !config.model_config.transducer.decoder_filename.empty() && 345 !config.model_config.transducer.decoder_filename.empty() &&
343 !config.model_config.transducer.joiner_filename.empty()) { 346 !config.model_config.transducer.joiner_filename.empty()) {
344 return std::make_unique<OfflineRecognizerTransducerNeMoImpl>(mgr, config); 347 return std::make_unique<OfflineRecognizerTransducerNeMoImpl>(mgr, config);
@@ -363,6 +366,7 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create( @@ -363,6 +366,7 @@ std::unique_ptr<OfflineRecognizerImpl> OfflineRecognizerImpl::Create(
363 " - EncDecCTCModelBPE models from NeMo\n" 366 " - EncDecCTCModelBPE models from NeMo\n"
364 " - EncDecCTCModel models from NeMo\n" 367 " - EncDecCTCModel models from NeMo\n"
365 " - EncDecHybridRNNTCTCBPEModel models from NeMo\n" 368 " - EncDecHybridRNNTCTCBPEModel models from NeMo\n"
  369 + " - EncDecRNNTBPEModel models from NeMo\n"
366 " - Whisper models\n" 370 " - Whisper models\n"
367 " - Tdnn models\n" 371 " - Tdnn models\n"
368 " - Zipformer CTC models\n" 372 " - Zipformer CTC models\n"
@@ -139,23 +139,29 @@ class OfflineRecognizerTransducerNeMoImpl : public OfflineRecognizerImpl { @@ -139,23 +139,29 @@ class OfflineRecognizerTransducerNeMoImpl : public OfflineRecognizerImpl {
139 } 139 }
140 } 140 }
141 141
142 - OfflineRecognizerConfig GetConfig() const override {  
143 - return config_;  
144 - } 142 + OfflineRecognizerConfig GetConfig() const override { return config_; }
145 143
146 private: 144 private:
147 void PostInit() { 145 void PostInit() {
148 config_.feat_config.nemo_normalize_type = 146 config_.feat_config.nemo_normalize_type =
149 model_->FeatureNormalizationMethod(); 147 model_->FeatureNormalizationMethod();
150 148
  149 + config_.feat_config.dither = 0;
  150 +
  151 + if (model_->IsGigaAM()) {
  152 + config_.feat_config.low_freq = 0;
  153 + config_.feat_config.high_freq = 8000;
  154 + config_.feat_config.remove_dc_offset = false;
  155 + config_.feat_config.preemph_coeff = 0;
  156 + config_.feat_config.window_type = "hann";
  157 + config_.feat_config.feature_dim = 64;
  158 + } else {
151 config_.feat_config.low_freq = 0; 159 config_.feat_config.low_freq = 0;
152 // config_.feat_config.high_freq = 8000; 160 // config_.feat_config.high_freq = 8000;
153 config_.feat_config.is_librosa = true; 161 config_.feat_config.is_librosa = true;
154 config_.feat_config.remove_dc_offset = false; 162 config_.feat_config.remove_dc_offset = false;
155 // config_.feat_config.window_type = "hann"; 163 // config_.feat_config.window_type = "hann";
156 - config_.feat_config.dither = 0;  
157 - config_.feat_config.nemo_normalize_type =  
158 - model_->FeatureNormalizationMethod(); 164 + }
159 165
160 int32_t vocab_size = model_->VocabSize(); 166 int32_t vocab_size = model_->VocabSize();
161 167
@@ -153,6 +153,8 @@ class OfflineTransducerNeMoModel::Impl { @@ -153,6 +153,8 @@ class OfflineTransducerNeMoModel::Impl {
153 153
154 std::string FeatureNormalizationMethod() const { return normalize_type_; } 154 std::string FeatureNormalizationMethod() const { return normalize_type_; }
155 155
  156 + bool IsGigaAM() const { return is_giga_am_; }
  157 +
156 private: 158 private:
157 void InitEncoder(void *model_data, size_t model_data_length) { 159 void InitEncoder(void *model_data, size_t model_data_length) {
158 encoder_sess_ = std::make_unique<Ort::Session>( 160 encoder_sess_ = std::make_unique<Ort::Session>(
@@ -181,9 +183,11 @@ class OfflineTransducerNeMoModel::Impl { @@ -181,9 +183,11 @@ class OfflineTransducerNeMoModel::Impl {
181 vocab_size_ += 1; 183 vocab_size_ += 1;
182 184
183 SHERPA_ONNX_READ_META_DATA(subsampling_factor_, "subsampling_factor"); 185 SHERPA_ONNX_READ_META_DATA(subsampling_factor_, "subsampling_factor");
184 - SHERPA_ONNX_READ_META_DATA_STR(normalize_type_, "normalize_type"); 186 + SHERPA_ONNX_READ_META_DATA_STR_ALLOW_EMPTY(normalize_type_,
  187 + "normalize_type");
185 SHERPA_ONNX_READ_META_DATA(pred_rnn_layers_, "pred_rnn_layers"); 188 SHERPA_ONNX_READ_META_DATA(pred_rnn_layers_, "pred_rnn_layers");
186 SHERPA_ONNX_READ_META_DATA(pred_hidden_, "pred_hidden"); 189 SHERPA_ONNX_READ_META_DATA(pred_hidden_, "pred_hidden");
  190 + SHERPA_ONNX_READ_META_DATA_WITH_DEFAULT(is_giga_am_, "is_giga_am", 0);
187 191
188 if (normalize_type_ == "NA") { 192 if (normalize_type_ == "NA") {
189 normalize_type_ = ""; 193 normalize_type_ = "";
@@ -245,6 +249,7 @@ class OfflineTransducerNeMoModel::Impl { @@ -245,6 +249,7 @@ class OfflineTransducerNeMoModel::Impl {
245 std::string normalize_type_; 249 std::string normalize_type_;
246 int32_t pred_rnn_layers_ = -1; 250 int32_t pred_rnn_layers_ = -1;
247 int32_t pred_hidden_ = -1; 251 int32_t pred_hidden_ = -1;
  252 + int32_t is_giga_am_ = 0;
248 }; 253 };
249 254
250 OfflineTransducerNeMoModel::OfflineTransducerNeMoModel( 255 OfflineTransducerNeMoModel::OfflineTransducerNeMoModel(
@@ -298,4 +303,6 @@ std::string OfflineTransducerNeMoModel::FeatureNormalizationMethod() const { @@ -298,4 +303,6 @@ std::string OfflineTransducerNeMoModel::FeatureNormalizationMethod() const {
298 return impl_->FeatureNormalizationMethod(); 303 return impl_->FeatureNormalizationMethod();
299 } 304 }
300 305
  306 +bool OfflineTransducerNeMoModel::IsGigaAM() const { return impl_->IsGigaAM(); }
  307 +
301 } // namespace sherpa_onnx 308 } // namespace sherpa_onnx
@@ -93,6 +93,8 @@ class OfflineTransducerNeMoModel { @@ -93,6 +93,8 @@ class OfflineTransducerNeMoModel {
93 // for details 93 // for details
94 std::string FeatureNormalizationMethod() const; 94 std::string FeatureNormalizationMethod() const;
95 95
  96 + bool IsGigaAM() const;
  97 +
96 private: 98 private:
97 class Impl; 99 class Impl;
98 std::unique_ptr<Impl> impl_; 100 std::unique_ptr<Impl> impl_;
@@ -404,6 +404,19 @@ fun getOfflineModelConfig(type: Int): OfflineModelConfig? { @@ -404,6 +404,19 @@ fun getOfflineModelConfig(type: Int): OfflineModelConfig? {
404 tokens = "$modelDir/tokens.txt", 404 tokens = "$modelDir/tokens.txt",
405 ) 405 )
406 } 406 }
  407 +
  408 + 20 -> {
  409 + val modelDir = "sherpa-onnx-nemo-transducer-giga-am-russian-2024-10-24"
  410 + return OfflineModelConfig(
  411 + transducer = OfflineTransducerModelConfig(
  412 + encoder = "$modelDir/encoder.int8.onnx",
  413 + decoder = "$modelDir/decoder.onnx",
  414 + joiner = "$modelDir/joiner.onnx",
  415 + ),
  416 + tokens = "$modelDir/tokens.txt",
  417 + modelType = "nemo_transducer",
  418 + )
  419 + }
407 } 420 }
408 return null 421 return null
409 } 422 }