Fangjun Kuang
Committed by GitHub

Fix initial tokens to decoding (#246)

cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
project(sherpa-onnx)
set(SHERPA_ONNX_VERSION "1.6.1")
set(SHERPA_ONNX_VERSION "1.6.2")
# Disable warning about
#
... ...
... ... @@ -30,8 +30,9 @@ OfflineTransducerGreedySearchDecoder::Decode(Ort::Value encoder_out,
std::vector<OfflineTransducerDecoderResult> ans(batch_size);
for (auto &r : ans) {
r.tokens.resize(context_size, -1);
// 0 is the ID of the blank token
r.tokens.resize(context_size, 0);
r.tokens.back() = 0;
}
auto decoder_input = model_->BuildDecoderInput(ans, ans.size());
... ...
... ... @@ -32,7 +32,8 @@ OfflineTransducerModifiedBeamSearchDecoder::Decode(
int32_t vocab_size = model_->VocabSize();
int32_t context_size = model_->ContextSize();
std::vector<int64_t> blanks(context_size, 0);
std::vector<int64_t> blanks(context_size, -1);
blanks.back() = 0;
std::deque<Hypotheses> finalized;
std::vector<Hypotheses> cur;
... ...
... ... @@ -55,7 +55,8 @@ OnlineTransducerGreedySearchDecoder::GetEmptyResult() const {
int32_t context_size = model_->ContextSize();
int32_t blank_id = 0; // always 0
OnlineTransducerDecoderResult r;
r.tokens.resize(context_size, blank_id);
r.tokens.resize(context_size, -1);
r.tokens.back() = blank_id;
return r;
}
... ...
... ... @@ -42,7 +42,9 @@ OnlineTransducerModifiedBeamSearchDecoder::GetEmptyResult() const {
int32_t context_size = model_->ContextSize();
int32_t blank_id = 0; // always 0
OnlineTransducerDecoderResult r;
std::vector<int64_t> blanks(context_size, blank_id);
std::vector<int64_t> blanks(context_size, -1);
blanks.back() = blank_id;
Hypotheses blank_hyp({{blanks, 0}});
r.hyps = std::move(blank_hyp);
r.tokens = std::move(blanks);
... ...