offline-lm.h
1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
// sherpa-onnx/csrc/offline-lm.h
//
// Copyright (c) 2023 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_OFFLINE_LM_H_
#define SHERPA_ONNX_CSRC_OFFLINE_LM_H_
#include <memory>
#include <vector>
#include "onnxruntime_cxx_api.h" // NOLINT
#include "sherpa-onnx/csrc/hypothesis.h"
#include "sherpa-onnx/csrc/offline-lm-config.h"
namespace sherpa_onnx {
class OfflineLM {
public:
virtual ~OfflineLM() = default;
static std::unique_ptr<OfflineLM> Create(const OfflineLMConfig &config);
template <typename Manager>
static std::unique_ptr<OfflineLM> Create(Manager *mgr,
const OfflineLMConfig &config);
/** Rescore a batch of sentences.
*
* @param x A 2-D tensor of shape (N, L) with data type int64.
* @param x_lens A 1-D tensor of shape (N,) with data type int64.
* It contains number of valid tokens in x before padding.
* @return Return a 1-D tensor of shape (N,) containing the negative log
* likelihood of each utterance. Its data type is float32.
*
* Caution: It returns negative log likelihood (nll), not log likelihood
*/
virtual Ort::Value Rescore(Ort::Value x, Ort::Value x_lens) = 0;
// This function updates hyp.lm_lob_prob of hyps.
//
// @param scale LM score
// @param context_size Context size of the transducer decoder model
// @param hyps It is changed in-place.
void ComputeLMScore(float scale, int32_t context_size,
std::vector<Hypotheses> *hyps);
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_OFFLINE_LM_H_