Committed by
GitHub
Fix style issues reported by clang-tidy (#1167)
正在显示
7 个修改的文件
包含
21 行增加
和
22 行删除
| @@ -102,13 +102,13 @@ class JiebaLexicon::Impl { | @@ -102,13 +102,13 @@ class JiebaLexicon::Impl { | ||
| 102 | this_sentence.push_back(blank); | 102 | this_sentence.push_back(blank); |
| 103 | 103 | ||
| 104 | if (w == "。" || w == "!" || w == "?" || w == ",") { | 104 | if (w == "。" || w == "!" || w == "?" || w == ",") { |
| 105 | - ans.push_back(std::move(this_sentence)); | 105 | + ans.emplace_back(std::move(this_sentence)); |
| 106 | this_sentence = {}; | 106 | this_sentence = {}; |
| 107 | } | 107 | } |
| 108 | } // for (const auto &w : words) | 108 | } // for (const auto &w : words) |
| 109 | 109 | ||
| 110 | if (!this_sentence.empty()) { | 110 | if (!this_sentence.empty()) { |
| 111 | - ans.push_back(std::move(this_sentence)); | 111 | + ans.emplace_back(std::move(this_sentence)); |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | return ans; | 114 | return ans; |
| @@ -253,7 +253,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsChinese( | @@ -253,7 +253,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsChinese( | ||
| 253 | if (eos != -1) { | 253 | if (eos != -1) { |
| 254 | this_sentence.push_back(eos); | 254 | this_sentence.push_back(eos); |
| 255 | } | 255 | } |
| 256 | - ans.push_back(std::move(this_sentence)); | 256 | + ans.emplace_back(std::move(this_sentence)); |
| 257 | this_sentence = {}; | 257 | this_sentence = {}; |
| 258 | 258 | ||
| 259 | if (sil != -1) { | 259 | if (sil != -1) { |
| @@ -283,7 +283,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsChinese( | @@ -283,7 +283,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsChinese( | ||
| 283 | if (eos != -1) { | 283 | if (eos != -1) { |
| 284 | this_sentence.push_back(eos); | 284 | this_sentence.push_back(eos); |
| 285 | } | 285 | } |
| 286 | - ans.push_back(std::move(this_sentence)); | 286 | + ans.emplace_back(std::move(this_sentence)); |
| 287 | 287 | ||
| 288 | return ans; | 288 | return ans; |
| 289 | } | 289 | } |
| @@ -324,7 +324,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsNotChinese( | @@ -324,7 +324,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsNotChinese( | ||
| 324 | 324 | ||
| 325 | if (w != ",") { | 325 | if (w != ",") { |
| 326 | this_sentence.push_back(blank); | 326 | this_sentence.push_back(blank); |
| 327 | - ans.push_back(std::move(this_sentence)); | 327 | + ans.emplace_back(std::move(this_sentence)); |
| 328 | this_sentence = {}; | 328 | this_sentence = {}; |
| 329 | } | 329 | } |
| 330 | 330 | ||
| @@ -348,7 +348,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsNotChinese( | @@ -348,7 +348,7 @@ std::vector<TokenIDs> Lexicon::ConvertTextToTokenIdsNotChinese( | ||
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | if (!this_sentence.empty()) { | 350 | if (!this_sentence.empty()) { |
| 351 | - ans.push_back(std::move(this_sentence)); | 351 | + ans.emplace_back(std::move(this_sentence)); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | return ans; | 354 | return ans; |
| @@ -91,7 +91,6 @@ class MeloTtsLexicon::Impl { | @@ -91,7 +91,6 @@ class MeloTtsLexicon::Impl { | ||
| 91 | std::vector<TokenIDs> ans; | 91 | std::vector<TokenIDs> ans; |
| 92 | TokenIDs this_sentence; | 92 | TokenIDs this_sentence; |
| 93 | 93 | ||
| 94 | - int32_t blank = token2id_.at("_"); | ||
| 95 | for (const auto &w : words) { | 94 | for (const auto &w : words) { |
| 96 | auto ids = ConvertWordToIds(w); | 95 | auto ids = ConvertWordToIds(w); |
| 97 | if (ids.tokens.empty()) { | 96 | if (ids.tokens.empty()) { |
| @@ -136,7 +136,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | @@ -136,7 +136,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | ||
| 136 | this_sentence.push_back(eos_id); | 136 | this_sentence.push_back(eos_id); |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | - ans.push_back(std::move(this_sentence)); | 139 | + ans.emplace_back(std::move(this_sentence)); |
| 140 | this_sentence = {}; | 140 | this_sentence = {}; |
| 141 | 141 | ||
| 142 | // re-initialize this_sentence | 142 | // re-initialize this_sentence |
| @@ -152,7 +152,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | @@ -152,7 +152,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | ||
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | if (static_cast<int32_t>(this_sentence.size()) > 1 + use_eos_bos) { | 154 | if (static_cast<int32_t>(this_sentence.size()) > 1 + use_eos_bos) { |
| 155 | - ans.push_back(std::move(this_sentence)); | 155 | + ans.emplace_back(std::move(this_sentence)); |
| 156 | } | 156 | } |
| 157 | } else { | 157 | } else { |
| 158 | // not adding blank | 158 | // not adding blank |
| @@ -171,7 +171,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | @@ -171,7 +171,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | ||
| 171 | this_sentence.push_back(eos_id); | 171 | this_sentence.push_back(eos_id); |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | - ans.push_back(std::move(this_sentence)); | 174 | + ans.emplace_back(std::move(this_sentence)); |
| 175 | this_sentence = {}; | 175 | this_sentence = {}; |
| 176 | 176 | ||
| 177 | // re-initialize this_sentence | 177 | // re-initialize this_sentence |
| @@ -182,7 +182,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | @@ -182,7 +182,7 @@ std::vector<TokenIDs> OfflineTtsCharacterFrontend::ConvertTextToTokenIds( | ||
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | if (this_sentence.size() > 1) { | 184 | if (this_sentence.size() > 1) { |
| 185 | - ans.push_back(std::move(this_sentence)); | 185 | + ans.emplace_back(std::move(this_sentence)); |
| 186 | } | 186 | } |
| 187 | } | 187 | } |
| 188 | 188 |
| @@ -15,12 +15,12 @@ namespace sherpa_onnx { | @@ -15,12 +15,12 @@ namespace sherpa_onnx { | ||
| 15 | struct TokenIDs { | 15 | struct TokenIDs { |
| 16 | TokenIDs() = default; | 16 | TokenIDs() = default; |
| 17 | 17 | ||
| 18 | - /*implicit*/ TokenIDs(const std::vector<int64_t> &tokens) // NOLINT | ||
| 19 | - : tokens{tokens} {} | 18 | + /*implicit*/ TokenIDs(std::vector<int64_t> tokens) // NOLINT |
| 19 | + : tokens{std::move(tokens)} {} | ||
| 20 | 20 | ||
| 21 | - TokenIDs(const std::vector<int64_t> &tokens, | ||
| 22 | - const std::vector<int64_t> &tones) | ||
| 23 | - : tokens{tokens}, tones{tones} {} | 21 | + TokenIDs(std::vector<int64_t> tokens, // NOLINT |
| 22 | + std::vector<int64_t> tones) // NOLINT | ||
| 23 | + : tokens{std::move(tokens)}, tones{std::move(tones)} {} | ||
| 24 | 24 | ||
| 25 | std::string ToString() const; | 25 | std::string ToString() const; |
| 26 | 26 |
| @@ -157,8 +157,8 @@ Ort::Value View(Ort::Value *v) { | @@ -157,8 +157,8 @@ Ort::Value View(Ort::Value *v) { | ||
| 157 | 157 | ||
| 158 | float ComputeSum(const Ort::Value *v, int32_t n /*= -1*/) { | 158 | float ComputeSum(const Ort::Value *v, int32_t n /*= -1*/) { |
| 159 | std::vector<int64_t> shape = v->GetTensorTypeAndShapeInfo().GetShape(); | 159 | std::vector<int64_t> shape = v->GetTensorTypeAndShapeInfo().GetShape(); |
| 160 | - auto size = static_cast<int32_t>(std::accumulate( | ||
| 161 | - shape.begin(), shape.end(), 1, std::multiplies<int64_t>())); | 160 | + auto size = static_cast<int32_t>( |
| 161 | + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>())); | ||
| 162 | if (n != -1 && n < size && n > 0) { | 162 | if (n != -1 && n < size && n > 0) { |
| 163 | size = n; | 163 | size = n; |
| 164 | } | 164 | } |
| @@ -170,8 +170,8 @@ float ComputeSum(const Ort::Value *v, int32_t n /*= -1*/) { | @@ -170,8 +170,8 @@ float ComputeSum(const Ort::Value *v, int32_t n /*= -1*/) { | ||
| 170 | 170 | ||
| 171 | float ComputeMean(const Ort::Value *v, int32_t n /*= -1*/) { | 171 | float ComputeMean(const Ort::Value *v, int32_t n /*= -1*/) { |
| 172 | std::vector<int64_t> shape = v->GetTensorTypeAndShapeInfo().GetShape(); | 172 | std::vector<int64_t> shape = v->GetTensorTypeAndShapeInfo().GetShape(); |
| 173 | - auto size = static_cast<int32_t>(std::accumulate( | ||
| 174 | - shape.begin(), shape.end(), 1, std::multiplies<int64_t>())); | 173 | + auto size = static_cast<int32_t>( |
| 174 | + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>())); | ||
| 175 | 175 | ||
| 176 | if (n != -1 && n < size && n > 0) { | 176 | if (n != -1 && n < size && n > 0) { |
| 177 | size = n; | 177 | size = n; |
| @@ -239,12 +239,12 @@ std::vector<TokenIDs> PiperPhonemizeLexicon::ConvertTextToTokenIds( | @@ -239,12 +239,12 @@ std::vector<TokenIDs> PiperPhonemizeLexicon::ConvertTextToTokenIds( | ||
| 239 | if (meta_data_.is_piper || meta_data_.is_icefall) { | 239 | if (meta_data_.is_piper || meta_data_.is_icefall) { |
| 240 | for (const auto &p : phonemes) { | 240 | for (const auto &p : phonemes) { |
| 241 | phoneme_ids = PiperPhonemesToIds(token2id_, p); | 241 | phoneme_ids = PiperPhonemesToIds(token2id_, p); |
| 242 | - ans.push_back(std::move(phoneme_ids)); | 242 | + ans.emplace_back(std::move(phoneme_ids)); |
| 243 | } | 243 | } |
| 244 | } else if (meta_data_.is_coqui) { | 244 | } else if (meta_data_.is_coqui) { |
| 245 | for (const auto &p : phonemes) { | 245 | for (const auto &p : phonemes) { |
| 246 | phoneme_ids = CoquiPhonemesToIds(token2id_, p, meta_data_); | 246 | phoneme_ids = CoquiPhonemesToIds(token2id_, p, meta_data_); |
| 247 | - ans.push_back(std::move(phoneme_ids)); | 247 | + ans.emplace_back(std::move(phoneme_ids)); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | } else { | 250 | } else { |
-
请 注册 或 登录 后发表评论