online-stream.cc
5.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
// sherpa-onnx/csrc/online-stream.cc
//
// Copyright (c) 2023 Xiaomi Corporation
#include "sherpa-onnx/csrc/online-stream.h"
#include <memory>
#include <utility>
#include <vector>
#include "sherpa-onnx/csrc/features.h"
namespace sherpa_onnx {
class OnlineStream::Impl {
public:
explicit Impl(const FeatureExtractorConfig &config,
ContextGraphPtr context_graph)
: feat_extractor_(config), context_graph_(context_graph) {}
void AcceptWaveform(int32_t sampling_rate, const float *waveform, int32_t n) {
feat_extractor_.AcceptWaveform(sampling_rate, waveform, n);
}
void InputFinished() const { feat_extractor_.InputFinished(); }
int32_t NumFramesReady() const {
return feat_extractor_.NumFramesReady() - start_frame_index_;
}
bool IsLastFrame(int32_t frame) const {
return feat_extractor_.IsLastFrame(frame);
}
std::vector<float> GetFrames(int32_t frame_index, int32_t n) const {
return feat_extractor_.GetFrames(frame_index + start_frame_index_, n);
}
void Reset() {
// we don't reset the feature extractor
start_frame_index_ += num_processed_frames_;
num_processed_frames_ = 0;
}
int32_t &GetNumProcessedFrames() { return num_processed_frames_; }
int32_t GetNumFramesSinceStart() const { return start_frame_index_; }
int32_t &GetCurrentSegment() { return segment_; }
void SetResult(const OnlineTransducerDecoderResult &r) { result_ = r; }
OnlineTransducerDecoderResult &GetResult() { return result_; }
OnlineCtcDecoderResult &GetCtcResult() { return ctc_result_; }
void SetCtcResult(const OnlineCtcDecoderResult &r) { ctc_result_ = r; }
void SetParaformerResult(const OnlineParaformerDecoderResult &r) {
paraformer_result_ = r;
}
OnlineParaformerDecoderResult &GetParaformerResult() {
return paraformer_result_;
}
int32_t FeatureDim() const { return feat_extractor_.FeatureDim(); }
void SetStates(std::vector<Ort::Value> states) {
states_ = std::move(states);
}
std::vector<Ort::Value> &GetStates() { return states_; }
const ContextGraphPtr &GetContextGraph() const { return context_graph_; }
std::vector<float> &GetParaformerFeatCache() {
return paraformer_feat_cache_;
}
std::vector<float> &GetParaformerEncoderOutCache() {
return paraformer_encoder_out_cache_;
}
std::vector<float> &GetParaformerAlphaCache() {
return paraformer_alpha_cache_;
}
private:
FeatureExtractor feat_extractor_;
/// For contextual-biasing
ContextGraphPtr context_graph_;
int32_t num_processed_frames_ = 0; // before subsampling
int32_t start_frame_index_ = 0; // never reset
int32_t segment_ = 0;
OnlineTransducerDecoderResult result_;
OnlineCtcDecoderResult ctc_result_;
std::vector<Ort::Value> states_; // states for transducer or ctc models
std::vector<float> paraformer_feat_cache_;
std::vector<float> paraformer_encoder_out_cache_;
std::vector<float> paraformer_alpha_cache_;
OnlineParaformerDecoderResult paraformer_result_;
};
OnlineStream::OnlineStream(const FeatureExtractorConfig &config /*= {}*/,
ContextGraphPtr context_graph /*= nullptr */)
: impl_(std::make_unique<Impl>(config, context_graph)) {}
OnlineStream::~OnlineStream() = default;
void OnlineStream::AcceptWaveform(int32_t sampling_rate, const float *waveform,
int32_t n) const {
impl_->AcceptWaveform(sampling_rate, waveform, n);
}
void OnlineStream::InputFinished() const { impl_->InputFinished(); }
int32_t OnlineStream::NumFramesReady() const { return impl_->NumFramesReady(); }
bool OnlineStream::IsLastFrame(int32_t frame) const {
return impl_->IsLastFrame(frame);
}
std::vector<float> OnlineStream::GetFrames(int32_t frame_index,
int32_t n) const {
return impl_->GetFrames(frame_index, n);
}
void OnlineStream::Reset() { impl_->Reset(); }
int32_t OnlineStream::FeatureDim() const { return impl_->FeatureDim(); }
int32_t &OnlineStream::GetNumProcessedFrames() {
return impl_->GetNumProcessedFrames();
}
int32_t OnlineStream::GetNumFramesSinceStart() const {
return impl_->GetNumFramesSinceStart();
}
int32_t &OnlineStream::GetCurrentSegment() {
return impl_->GetCurrentSegment();
}
void OnlineStream::SetResult(const OnlineTransducerDecoderResult &r) {
impl_->SetResult(r);
}
OnlineTransducerDecoderResult &OnlineStream::GetResult() {
return impl_->GetResult();
}
OnlineCtcDecoderResult &OnlineStream::GetCtcResult() {
return impl_->GetCtcResult();
}
void OnlineStream::SetCtcResult(const OnlineCtcDecoderResult &r) {
impl_->SetCtcResult(r);
}
void OnlineStream::SetParaformerResult(const OnlineParaformerDecoderResult &r) {
impl_->SetParaformerResult(r);
}
OnlineParaformerDecoderResult &OnlineStream::GetParaformerResult() {
return impl_->GetParaformerResult();
}
void OnlineStream::SetStates(std::vector<Ort::Value> states) {
impl_->SetStates(std::move(states));
}
std::vector<Ort::Value> &OnlineStream::GetStates() {
return impl_->GetStates();
}
const ContextGraphPtr &OnlineStream::GetContextGraph() const {
return impl_->GetContextGraph();
}
std::vector<float> &OnlineStream::GetParaformerFeatCache() {
return impl_->GetParaformerFeatCache();
}
std::vector<float> &OnlineStream::GetParaformerEncoderOutCache() {
return impl_->GetParaformerEncoderOutCache();
}
std::vector<float> &OnlineStream::GetParaformerAlphaCache() {
return impl_->GetParaformerAlphaCache();
}
} // namespace sherpa_onnx