offline-whisper-model-config.h
1.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
// sherpa-onnx/csrc/offline-whisper-model-config.h
//
// Copyright (c) 2023 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_OFFLINE_WHISPER_MODEL_CONFIG_H_
#define SHERPA_ONNX_CSRC_OFFLINE_WHISPER_MODEL_CONFIG_H_
#include <string>
#include "sherpa-onnx/csrc/parse-options.h"
namespace sherpa_onnx {
struct OfflineWhisperModelConfig {
std::string encoder;
std::string decoder;
// Available languages can be found at
// https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
//
// Note: For non-multilingual models, it supports only "en"
//
// If empty, we will infer it from the input audio file when
// the model is multilingual.
std::string language;
// Valid values are transcribe and translate
//
// Note: For non-multilingual models, it supports only "transcribe"
std::string task = "transcribe";
// Number of tail padding frames.
//
// Since we remove the 30-second constraint, we need to add some paddings
// at the end.
//
// Recommended values:
// - 50 for English models
// - 300 for multilingual models
int32_t tail_paddings = -1;
OfflineWhisperModelConfig() = default;
OfflineWhisperModelConfig(const std::string &encoder,
const std::string &decoder,
const std::string &language,
const std::string &task, int32_t tail_paddings)
: encoder(encoder),
decoder(decoder),
language(language),
task(task),
tail_paddings(tail_paddings) {}
void Register(ParseOptions *po);
bool Validate() const;
std::string ToString() const;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_OFFLINE_WHISPER_MODEL_CONFIG_H_