Committed by
GitHub
Handle audio files less than 10s long for speaker diarization. (#1412)
If the input audio file is less than 10 seconds long, there is only one chunk, and there is no need to compute embeddings or do clustering. We can use the segmentation result from the speaker segmentation model directly.
正在显示
1 个修改的文件
包含
32 行增加
和
3 行删除
| @@ -99,6 +99,14 @@ class OfflineSpeakerDiarizationPyannoteImpl | @@ -99,6 +99,14 @@ class OfflineSpeakerDiarizationPyannoteImpl | ||
| 99 | 99 | ||
| 100 | segmentations.clear(); | 100 | segmentations.clear(); |
| 101 | 101 | ||
| 102 | + if (labels.size() == 1) { | ||
| 103 | + if (callback) { | ||
| 104 | + callback(1, 1, callback_arg); | ||
| 105 | + } | ||
| 106 | + | ||
| 107 | + return HandleOneChunkSpecialCase(labels[0], n); | ||
| 108 | + } | ||
| 109 | + | ||
| 102 | // labels[i] is a 0-1 matrix of shape (num_frames, num_speakers) | 110 | // labels[i] is a 0-1 matrix of shape (num_frames, num_speakers) |
| 103 | 111 | ||
| 104 | // speaker count per frame | 112 | // speaker count per frame |
| @@ -201,7 +209,7 @@ class OfflineSpeakerDiarizationPyannoteImpl | @@ -201,7 +209,7 @@ class OfflineSpeakerDiarizationPyannoteImpl | ||
| 201 | } | 209 | } |
| 202 | 210 | ||
| 203 | int32_t num_chunks = (n - window_size) / window_shift + 1; | 211 | int32_t num_chunks = (n - window_size) / window_shift + 1; |
| 204 | - bool has_last_chunk = (n - window_size) % window_shift > 0; | 212 | + bool has_last_chunk = ((n - window_size) % window_shift) > 0; |
| 205 | 213 | ||
| 206 | ans.reserve(num_chunks + has_last_chunk); | 214 | ans.reserve(num_chunks + has_last_chunk); |
| 207 | 215 | ||
| @@ -524,9 +532,9 @@ class OfflineSpeakerDiarizationPyannoteImpl | @@ -524,9 +532,9 @@ class OfflineSpeakerDiarizationPyannoteImpl | ||
| 524 | count(seq, Eigen::all).array() += labels[i].array(); | 532 | count(seq, Eigen::all).array() += labels[i].array(); |
| 525 | } | 533 | } |
| 526 | 534 | ||
| 527 | - bool has_last_chunk = (num_samples - window_size) % window_shift > 0; | 535 | + bool has_last_chunk = ((num_samples - window_size) % window_shift) > 0; |
| 528 | 536 | ||
| 529 | - if (has_last_chunk) { | 537 | + if (!has_last_chunk) { |
| 530 | return count; | 538 | return count; |
| 531 | } | 539 | } |
| 532 | 540 | ||
| @@ -622,6 +630,27 @@ class OfflineSpeakerDiarizationPyannoteImpl | @@ -622,6 +630,27 @@ class OfflineSpeakerDiarizationPyannoteImpl | ||
| 622 | return ans; | 630 | return ans; |
| 623 | } | 631 | } |
| 624 | 632 | ||
| 633 | + OfflineSpeakerDiarizationResult HandleOneChunkSpecialCase( | ||
| 634 | + const Matrix2DInt32 &final_labels, int32_t num_samples) const { | ||
| 635 | + const auto &meta_data = segmentation_model_.GetModelMetaData(); | ||
| 636 | + int32_t window_size = meta_data.window_size; | ||
| 637 | + int32_t window_shift = meta_data.window_shift; | ||
| 638 | + int32_t receptive_field_shift = meta_data.receptive_field_shift; | ||
| 639 | + | ||
| 640 | + bool has_last_chunk = (num_samples - window_size) % window_shift > 0; | ||
| 641 | + if (!has_last_chunk) { | ||
| 642 | + return ComputeResult(final_labels); | ||
| 643 | + } | ||
| 644 | + | ||
| 645 | + int32_t num_frames = final_labels.rows(); | ||
| 646 | + | ||
| 647 | + int32_t new_num_frames = num_samples / receptive_field_shift; | ||
| 648 | + | ||
| 649 | + num_frames = (new_num_frames <= num_frames) ? new_num_frames : num_frames; | ||
| 650 | + | ||
| 651 | + return ComputeResult(final_labels(Eigen::seq(0, num_frames), Eigen::all)); | ||
| 652 | + } | ||
| 653 | + | ||
| 625 | void MergeSegments( | 654 | void MergeSegments( |
| 626 | std::vector<OfflineSpeakerDiarizationSegment> *segments) const { | 655 | std::vector<OfflineSpeakerDiarizationSegment> *segments) const { |
| 627 | float min_duration_off = config_.min_duration_off; | 656 | float min_duration_off = config_.min_duration_off; |
-
请 注册 或 登录 后发表评论