Fangjun Kuang
Committed by GitHub

C++ API demo for speaker identification with portaudio. (#561)

@@ -155,6 +155,7 @@ class BuildExtension(build_ext): @@ -155,6 +155,7 @@ class BuildExtension(build_ext):
155 binaries += ["sherpa-onnx-offline"] 155 binaries += ["sherpa-onnx-offline"]
156 binaries += ["sherpa-onnx-microphone"] 156 binaries += ["sherpa-onnx-microphone"]
157 binaries += ["sherpa-onnx-microphone-offline"] 157 binaries += ["sherpa-onnx-microphone-offline"]
  158 + binaries += ["sherpa-onnx-microphone-offline-speaker-identification"]
158 binaries += ["sherpa-onnx-online-websocket-server"] 159 binaries += ["sherpa-onnx-online-websocket-server"]
159 binaries += ["sherpa-onnx-offline-websocket-server"] 160 binaries += ["sherpa-onnx-offline-websocket-server"]
160 binaries += ["sherpa-onnx-online-websocket-client"] 161 binaries += ["sherpa-onnx-online-websocket-client"]
@@ -48,6 +48,7 @@ def get_binaries_to_install(): @@ -48,6 +48,7 @@ def get_binaries_to_install():
48 binaries += ["sherpa-onnx-offline"] 48 binaries += ["sherpa-onnx-offline"]
49 binaries += ["sherpa-onnx-microphone"] 49 binaries += ["sherpa-onnx-microphone"]
50 binaries += ["sherpa-onnx-microphone-offline"] 50 binaries += ["sherpa-onnx-microphone-offline"]
  51 + binaries += ["sherpa-onnx-microphone-offline-speaker-identification"]
51 binaries += ["sherpa-onnx-online-websocket-server"] 52 binaries += ["sherpa-onnx-online-websocket-server"]
52 binaries += ["sherpa-onnx-offline-websocket-server"] 53 binaries += ["sherpa-onnx-offline-websocket-server"]
53 binaries += ["sherpa-onnx-online-websocket-client"] 54 binaries += ["sherpa-onnx-online-websocket-client"]
@@ -287,6 +287,11 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO) @@ -287,6 +287,11 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO)
287 microphone.cc 287 microphone.cc
288 ) 288 )
289 289
  290 + add_executable(sherpa-onnx-microphone-offline-speaker-identification
  291 + sherpa-onnx-microphone-offline-speaker-identification.cc
  292 + microphone.cc
  293 + )
  294 +
290 if(BUILD_SHARED_LIBS) 295 if(BUILD_SHARED_LIBS)
291 set(PA_LIB portaudio) 296 set(PA_LIB portaudio)
292 else() 297 else()
@@ -294,9 +299,10 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO) @@ -294,9 +299,10 @@ if(SHERPA_ONNX_ENABLE_PORTAUDIO)
294 endif() 299 endif()
295 300
296 set(exes 301 set(exes
297 - sherpa-onnx-offline-tts-play  
298 sherpa-onnx-microphone 302 sherpa-onnx-microphone
299 sherpa-onnx-microphone-offline 303 sherpa-onnx-microphone-offline
  304 + sherpa-onnx-microphone-offline-speaker-identification
  305 + sherpa-onnx-offline-tts-play
300 sherpa-onnx-vad-microphone 306 sherpa-onnx-vad-microphone
301 sherpa-onnx-vad-microphone-offline-asr 307 sherpa-onnx-vad-microphone-offline-asr
302 ) 308 )
  1 +// sherpa-onnx/csrc/sherpa-onnx-microphone-offline-speaker-identification.cc
  2 +//
  3 +// Copyright (c) 2024 Xiaomi Corporation
  4 +
  5 +#include <signal.h>
  6 +#include <stdio.h>
  7 +#include <stdlib.h>
  8 +
  9 +#include <algorithm>
  10 +#include <fstream>
  11 +#include <mutex> // NOLINT
  12 +#include <sstream>
  13 +#include <thread> // NOLINT
  14 +
  15 +#include "portaudio.h" // NOLINT
  16 +#include "sherpa-onnx/csrc/macros.h"
  17 +#include "sherpa-onnx/csrc/microphone.h"
  18 +#include "sherpa-onnx/csrc/speaker-embedding-extractor.h"
  19 +#include "sherpa-onnx/csrc/speaker-embedding-manager.h"
  20 +#include "sherpa-onnx/csrc/wave-reader.h"
  21 +
  22 +enum class State {
  23 + kIdle,
  24 + kRecording,
  25 + kComputing,
  26 +};
  27 +
  28 +State state = State::kIdle;
  29 +
  30 +// true to stop the program and exit
  31 +bool stop = false;
  32 +
  33 +std::vector<float> samples;
  34 +std::mutex samples_mutex;
  35 +
  36 +static void DetectKeyPress() {
  37 + SHERPA_ONNX_LOGE("\nPress Enter to start");
  38 + int32_t key;
  39 + while (!stop && (key = getchar())) {
  40 + if (key != 0x0a) {
  41 + continue;
  42 + }
  43 +
  44 + switch (state) {
  45 + case State::kIdle:
  46 + SHERPA_ONNX_LOGE("\nStart recording. Press Enter to stop recording");
  47 + state = State::kRecording;
  48 + {
  49 + std::lock_guard<std::mutex> lock(samples_mutex);
  50 + samples.clear();
  51 + }
  52 + break;
  53 + case State::kRecording:
  54 + SHERPA_ONNX_LOGE("\nStop recording. Computing ...");
  55 + state = State::kComputing;
  56 + break;
  57 + case State::kComputing:
  58 + break;
  59 + }
  60 + }
  61 +}
  62 +
  63 +static int32_t RecordCallback(const void *input_buffer,
  64 + void * /*output_buffer*/,
  65 + unsigned long frames_per_buffer, // NOLINT
  66 + const PaStreamCallbackTimeInfo * /*time_info*/,
  67 + PaStreamCallbackFlags /*status_flags*/,
  68 + void *user_data) {
  69 + std::lock_guard<std::mutex> lock(samples_mutex);
  70 +
  71 + auto p = reinterpret_cast<const float *>(input_buffer);
  72 + samples.insert(samples.end(), p, p + frames_per_buffer);
  73 +
  74 + return stop ? paComplete : paContinue;
  75 +}
  76 +
  77 +static void Handler(int32_t sig) {
  78 + stop = true;
  79 + fprintf(stderr, "\nCaught Ctrl + C. Press Enter to exit\n");
  80 +}
  81 +
  82 +static std::vector<std::vector<float>> ComputeEmbeddings(
  83 + const std::vector<std::string> &filenames,
  84 + sherpa_onnx::SpeakerEmbeddingExtractor *extractor) {
  85 + std::vector<std::vector<float>> embedding_list;
  86 + embedding_list.reserve(filenames.size());
  87 +
  88 + for (const auto &f : filenames) {
  89 + int32_t sampling_rate = -1;
  90 +
  91 + bool is_ok = false;
  92 + const std::vector<float> samples =
  93 + sherpa_onnx::ReadWave(f, &sampling_rate, &is_ok);
  94 +
  95 + if (!is_ok) {
  96 + fprintf(stderr, "Failed to read %s\n", f.c_str());
  97 + exit(-1);
  98 + }
  99 +
  100 + auto s = extractor->CreateStream();
  101 + s->AcceptWaveform(sampling_rate, samples.data(), samples.size());
  102 + s->InputFinished();
  103 + auto embedding = extractor->Compute(s.get());
  104 + embedding_list.push_back(embedding);
  105 + }
  106 + return embedding_list;
  107 +}
  108 +
  109 +static std::unordered_map<std::string, std::vector<std::string>>
  110 +ReadSpeakerFile(const std::string &filename) {
  111 + std::unordered_map<std::string, std::vector<std::string>> ans;
  112 +
  113 + std::ifstream is(filename);
  114 + if (!is) {
  115 + fprintf(stderr, "Failed to open %s", filename.c_str());
  116 + exit(0);
  117 + }
  118 +
  119 + std::string line;
  120 + std::string name;
  121 + std::string path;
  122 +
  123 + while (std::getline(is, line)) {
  124 + std::istringstream iss(line);
  125 + name.clear();
  126 + path.clear();
  127 +
  128 + iss >> name >> path;
  129 + if (!iss || !iss.eof() || name.empty() || path.empty()) {
  130 + fprintf(stderr, "Invalid line: %s\n", line.c_str());
  131 + exit(-1);
  132 + }
  133 + ans[name].push_back(path);
  134 + }
  135 +
  136 + return ans;
  137 +}
  138 +
  139 +int32_t main(int32_t argc, char *argv[]) {
  140 + signal(SIGINT, Handler);
  141 +
  142 + const char *kUsageMessage = R"usage(
  143 +This program shows how to use non-streaming speaker identification.
  144 +Usage:
  145 +
  146 +(1) Prepare a text file containing speaker related files.
  147 +
  148 +Each line in the text file contains two columns. The first column is the
  149 +speaker name, while the second column contains the wave file of the speaker.
  150 +
  151 +If the text file contains multiple wave files for the same speaker, then the
  152 +embeddings of these files are averaged.
  153 +
  154 +An example text file is given below:
  155 +
  156 + foo /path/to/a.wav
  157 + bar /path/to/b.wav
  158 + foo /path/to/c.wav
  159 + foobar /path/to/d.wav
  160 +
  161 +Each wave file should contain only a single channel; the sample format
  162 +should be int16_t; the sample rate can be arbitrary.
  163 +
  164 +(2) Download a model for computing speaker embeddings
  165 +
  166 +Please visit
  167 +https://github.com/k2-fsa/sherpa-onnx/releases/tag/speaker-recongition-models
  168 +to download a model. An example is given below:
  169 +
  170 + wget https://github.com/k2-fsa/sherpa-onnx/releases/download/speaker-recongition-models/wespeaker_zh_cnceleb_resnet34.onnx
  171 +
  172 +Note that `zh` means Chinese, while `en` means English.
  173 +
  174 +(3) Run it !
  175 +
  176 + ./bin/sherpa-onnx-microphone-offline-speaker-identification \
  177 + --model=/path/to/your-model.onnx \
  178 + --speaker-file=/path/to/speaker.txt
  179 +)usage";
  180 +
  181 + sherpa_onnx::ParseOptions po(kUsageMessage);
  182 + float threshold = 0.5;
  183 + std::string speaker_file;
  184 +
  185 + po.Register("threshold", &threshold,
  186 + "Threshold for comparing embedding scores.");
  187 +
  188 + po.Register("speaker-file", &speaker_file, "Path to speaker.txt");
  189 +
  190 + sherpa_onnx::SpeakerEmbeddingExtractorConfig config;
  191 + config.Register(&po);
  192 +
  193 + po.Read(argc, argv);
  194 + if (po.NumArgs() != 0) {
  195 + fprintf(stderr,
  196 + "This program does not support any positional arguments.\n");
  197 + po.PrintUsage();
  198 + exit(EXIT_FAILURE);
  199 + }
  200 +
  201 + fprintf(stderr, "%s\n", config.ToString().c_str());
  202 +
  203 + if (!config.Validate()) {
  204 + fprintf(stderr, "Errors in config! Please use --help to view the usage.\n");
  205 + return -1;
  206 + }
  207 +
  208 + SHERPA_ONNX_LOGE("\nCreating extractor ...");
  209 + sherpa_onnx::SpeakerEmbeddingExtractor extractor(config);
  210 + SHERPA_ONNX_LOGE("\nextractor created!");
  211 +
  212 + sherpa_onnx::SpeakerEmbeddingManager manager(extractor.Dim());
  213 +
  214 + auto name2files = ReadSpeakerFile(speaker_file);
  215 + for (const auto &p : name2files) {
  216 + SHERPA_ONNX_LOGE("\nProcessing speaker %s", p.first.c_str());
  217 + auto embedding_list = ComputeEmbeddings(p.second, &extractor);
  218 + manager.Add(p.first, embedding_list);
  219 + }
  220 +
  221 + sherpa_onnx::Microphone mic;
  222 +
  223 + PaDeviceIndex num_devices = Pa_GetDeviceCount();
  224 + fprintf(stderr, "Num devices: %d\n", num_devices);
  225 +
  226 + PaStreamParameters param;
  227 +
  228 + param.device = Pa_GetDefaultInputDevice();
  229 + if (param.device == paNoDevice) {
  230 + fprintf(stderr, "No default input device found\n");
  231 + exit(EXIT_FAILURE);
  232 + }
  233 + fprintf(stderr, "Use default device: %d\n", param.device);
  234 +
  235 + const PaDeviceInfo *info = Pa_GetDeviceInfo(param.device);
  236 + fprintf(stderr, " Name: %s\n", info->name);
  237 + fprintf(stderr, " Max input channels: %d\n", info->maxInputChannels);
  238 +
  239 + param.channelCount = 1;
  240 + param.sampleFormat = paFloat32;
  241 +
  242 + param.suggestedLatency = info->defaultLowInputLatency;
  243 + param.hostApiSpecificStreamInfo = nullptr;
  244 + float sample_rate = 16000;
  245 +
  246 + PaStream *stream;
  247 + PaError err =
  248 + Pa_OpenStream(&stream, &param, nullptr, /* &outputParameters, */
  249 + sample_rate,
  250 + 0, // frames per buffer
  251 + paClipOff, // we won't output out of range samples
  252 + // so don't bother clipping them
  253 + RecordCallback, nullptr);
  254 + if (err != paNoError) {
  255 + fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
  256 + exit(EXIT_FAILURE);
  257 + }
  258 +
  259 + err = Pa_StartStream(stream);
  260 + fprintf(stderr, "Started\n");
  261 +
  262 + if (err != paNoError) {
  263 + fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
  264 + exit(EXIT_FAILURE);
  265 + }
  266 +
  267 + std::thread t(DetectKeyPress);
  268 + while (!stop) {
  269 + switch (state) {
  270 + case State::kIdle:
  271 + break;
  272 + case State::kRecording:
  273 + break;
  274 + case State::kComputing: {
  275 + std::vector<float> buf;
  276 + {
  277 + std::lock_guard<std::mutex> lock(samples_mutex);
  278 + buf = std::move(samples);
  279 + }
  280 +
  281 + auto s = extractor.CreateStream();
  282 + s->AcceptWaveform(sample_rate, buf.data(), buf.size());
  283 + s->InputFinished();
  284 + auto embedding = extractor.Compute(s.get());
  285 + auto name = manager.Search(embedding.data(), threshold);
  286 +
  287 + if (name.empty()) {
  288 + name = "--Unknown--";
  289 + }
  290 +
  291 + SHERPA_ONNX_LOGE("\nDone!\nDetected speaker is: %s", name.c_str());
  292 +
  293 + state = State::kIdle;
  294 + SHERPA_ONNX_LOGE("\nPress Enter to start");
  295 + break;
  296 + }
  297 + }
  298 +
  299 + Pa_Sleep(20); // sleep for 20ms
  300 + }
  301 + t.join();
  302 +
  303 + err = Pa_CloseStream(stream);
  304 + if (err != paNoError) {
  305 + fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
  306 + exit(EXIT_FAILURE);
  307 + }
  308 +
  309 + return 0;
  310 +}
@@ -26,7 +26,7 @@ void SpeakerEmbeddingExtractorConfig::Register(ParseOptions *po) { @@ -26,7 +26,7 @@ void SpeakerEmbeddingExtractorConfig::Register(ParseOptions *po) {
26 26
27 bool SpeakerEmbeddingExtractorConfig::Validate() const { 27 bool SpeakerEmbeddingExtractorConfig::Validate() const {
28 if (model.empty()) { 28 if (model.empty()) {
29 - SHERPA_ONNX_LOGE("Please provide --speaker-embedding-model"); 29 + SHERPA_ONNX_LOGE("Please provide --model");
30 return false; 30 return false;
31 } 31 }
32 32