offline-lm-config.cc
1.0 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
// sherpa-onnx/csrc/offline-lm-config.cc
//
// Copyright (c) 2023 Xiaomi Corporation
#include "sherpa-onnx/csrc/offline-lm-config.h"
#include <string>
#include "sherpa-onnx/csrc/file-utils.h"
#include "sherpa-onnx/csrc/macros.h"
namespace sherpa_onnx {
void OfflineLMConfig::Register(ParseOptions *po) {
po->Register("lm", &model, "Path to LM model.");
po->Register("lm-scale", &scale, "LM scale.");
po->Register("lm-num-threads", &lm_num_threads,
"Number of threads to run the neural network of LM model");
po->Register("lm-provider", &lm_provider,
"Specify a provider to LM model use: cpu, cuda, coreml");
}
bool OfflineLMConfig::Validate() const {
if (!FileExists(model)) {
SHERPA_ONNX_LOGE("'%s' does not exist", model.c_str());
return false;
}
return true;
}
std::string OfflineLMConfig::ToString() const {
std::ostringstream os;
os << "OfflineLMConfig(";
os << "model=\"" << model << "\", ";
os << "scale=" << scale << ")";
return os.str();
}
} // namespace sherpa_onnx