Jingzhao Ou
Committed by GitHub

Added provider option to sherpa-onnx and decode-file-c-api (#162)

... ... @@ -36,22 +36,22 @@ $repo/test_wavs/8k.wav
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.onnx \
$repo/decoder-epoch-99-avg-1.onnx \
$repo/joiner-epoch-99-avg-1.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.onnx \
--num-threads=2 \
$wave
done
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.int8.onnx \
$repo/decoder-epoch-99-avg-1.int8.onnx \
$repo/joiner-epoch-99-avg-1.int8.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.int8.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.int8.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.int8.onnx \
--num-threads=2 \
$wave
done
rm -rf $repo
... ... @@ -79,22 +79,22 @@ $repo/test_wavs/8k.wav
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-11-avg-1.onnx \
$repo/decoder-epoch-11-avg-1.onnx \
$repo/joiner-epoch-11-avg-1.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-11-avg-1.onnx \
--decoder=$repo/decoder-epoch-11-avg-1.onnx \
--joiner=$repo/joiner-epoch-11-avg-1.onnx \
--num-threads=2 \
$wave
done
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-11-avg-1.int8.onnx \
$repo/decoder-epoch-11-avg-1.int8.onnx \
$repo/joiner-epoch-11-avg-1.int8.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-11-avg-1.int8.onnx \
--decoder=$repo/decoder-epoch-11-avg-1.int8.onnx \
--joiner=$repo/joiner-epoch-11-avg-1.int8.onnx \
--num-threads=2 \
$wave
done
rm -rf $repo
... ... @@ -122,24 +122,24 @@ $repo/test_wavs/8k.wav
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.onnx \
$repo/decoder-epoch-99-avg-1.onnx \
$repo/joiner-epoch-99-avg-1.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.onnx \
--num-threads=2 \
$wave
done
# test int8
#
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.int8.onnx \
$repo/decoder-epoch-99-avg-1.int8.onnx \
$repo/joiner-epoch-99-avg-1.int8.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.int8.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.int8.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.int8.onnx \
--num-threads=2 \
$wave
done
rm -rf $repo
... ... @@ -169,22 +169,22 @@ $repo/test_wavs/8k.wav
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.onnx \
$repo/decoder-epoch-99-avg-1.onnx \
$repo/joiner-epoch-99-avg-1.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.onnx \
--num-threads=2 \
$wave
done
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.int8.onnx \
$repo/decoder-epoch-99-avg-1.int8.onnx \
$repo/joiner-epoch-99-avg-1.int8.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.int8.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.int8.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.int8.onnx \
--num-threads=2 \
$wave
done
# Decode a URL
... ... @@ -233,22 +233,22 @@ $repo/test_wavs/2.wav
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.onnx \
$repo/decoder-epoch-99-avg-1.onnx \
$repo/joiner-epoch-99-avg-1.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.onnx \
--num-threads=2 \
$wave
done
for wave in ${waves[@]}; do
time $EXE \
$repo/tokens.txt \
$repo/encoder-epoch-99-avg-1.int8.onnx \
$repo/decoder-epoch-99-avg-1.int8.onnx \
$repo/joiner-epoch-99-avg-1.int8.onnx \
$wave \
2
--tokens=$repo/tokens.txt \
--encoder=$repo/encoder-epoch-99-avg-1.int8.onnx \
--decoder=$repo/decoder-epoch-99-avg-1.int8.onnx \
--joiner=$repo/joiner-epoch-99-avg-1.int8.onnx \
--num-threads=2 \
$wave
done
rm -rf $repo
... ...
include(cargs)
include_directories(${CMAKE_SOURCE_DIR})
add_executable(decode-file-c-api decode-file-c-api.c)
target_link_libraries(decode-file-c-api sherpa-onnx-c-api)
target_link_libraries(decode-file-c-api sherpa-onnx-c-api cargs)
... ...
... ... @@ -5,50 +5,85 @@
// This file shows how to use sherpa-onnx C API
// to decode a file.
#include "cargs.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "sherpa-onnx/c-api/c-api.h"
static struct cag_option options[] = {
{
.identifier = 't',
.access_letters = NULL,
.access_name = "tokens",
.value_name = "tokens",
.description = "Tokens file"
}, {
.identifier = 'e',
.access_letters = NULL,
.access_name = "encoder",
.value_name = "encoder",
.description = "Encoder ONNX file"
}, {
.identifier = 'd',
.access_letters = NULL,
.access_name = "decoder",
.value_name = "decoder",
.description = "Decoder ONNX file"
}, {
.identifier = 'j',
.access_letters = NULL,
.access_name = "joiner",
.value_name = "joiner",
.description = "Joiner ONNX file"
}, {
.identifier = 'n',
.access_letters = NULL,
.access_name = "num-threads",
.value_name = "num-threads",
.description = "Number of threads"
}, {
.identifier = 'p',
.access_letters = NULL,
.access_name = "provider",
.value_name = "provider",
.description = "Provider: cpu (default), cuda, coreml"
}, {
.identifier = 'm',
.access_letters = NULL,
.access_name = "decoding-method",
.value_name = "decoding-method",
.description =
"Decoding method: greedy_search (default), modified_beam_search"
}
};
const char *kUsage =
"\n"
"Usage:\n "
" ./bin/decode-file-c-api \\\n"
" /path/to/tokens.txt \\\n"
" /path/to/encoder.onnx \\\n"
" /path/to/decoder.onnx \\\n"
" /path/to/joiner.onnx \\\n"
" /path/to/foo.wav [num_threads [decoding_method]]\n"
" --tokens=/path/to/tokens.txt \\\n"
" --encoder=/path/to/encoder.onnx \\\n"
" --decoder=/path/to/decoder.onnx \\\n"
" --joiner=/path/to/joiner.onnx \\\n"
" /path/to/foo.wav\n"
"\n\n"
"Default num_threads is 1.\n"
"Valid decoding_method: greedy_search (default), modified_beam_search\n\n"
"Valid provider: cpu (default), cuda, coreml\n\n"
"Please refer to \n"
"https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html\n"
"for a list of pre-trained models to download.\n";
int32_t main(int32_t argc, char *argv[]) {
if (argc < 6 || argc > 8) {
fprintf(stderr, "%s\n", kUsage);
return -1;
}
SherpaOnnxOnlineRecognizerConfig config;
config.model_config.tokens = argv[1];
config.model_config.encoder = argv[2];
config.model_config.decoder = argv[3];
config.model_config.joiner = argv[4];
int32_t num_threads = 1;
if (argc == 7 && atoi(argv[6]) > 0) {
num_threads = atoi(argv[6]);
}
config.model_config.num_threads = num_threads;
config.model_config.debug = 0;
config.model_config.num_threads = 1;
config.model_config.provider = "cpu";
config.decoding_method = "greedy_search";
if (argc == 8) {
config.decoding_method = argv[7];
}
config.max_active_paths = 4;
... ... @@ -60,13 +95,36 @@ int32_t main(int32_t argc, char *argv[]) {
config.rule2_min_trailing_silence = 1.2;
config.rule3_min_utterance_length = 300;
cag_option_context context;
char identifier;
const char *value;
cag_option_prepare(&context, options, CAG_ARRAY_SIZE(options), argc, argv);
while (cag_option_fetch(&context)) {
identifier = cag_option_get(&context);
value = cag_option_get_value(&context);
switch (identifier) {
case 't': config.model_config.tokens = value; break;
case 'e': config.model_config.encoder = value; break;
case 'd': config.model_config.decoder = value; break;
case 'j': config.model_config.joiner = value; break;
case 'n': config.model_config.num_threads = atoi(value); break;
case 'p': config.model_config.provider = value; break;
case 'm': config.decoding_method = value; break;
default:
// do nothing as config already have valid default values
break;
}
}
SherpaOnnxOnlineRecognizer *recognizer = CreateOnlineRecognizer(&config);
SherpaOnnxOnlineStream *stream = CreateOnlineStream(recognizer);
SherpaOnnxDisplay *display = CreateDisplay(50);
int32_t segment_id = 0;
const char *wav_filename = argv[5];
const char *wav_filename = argv[context.index];
FILE *fp = fopen(wav_filename, "rb");
if (!fp) {
fprintf(stderr, "Failed to open %s\n", wav_filename);
... ...
function(download_cargs)
include(FetchContent)
set(cargs_URL "https://github.com/likle/cargs/archive/refs/tags/v1.0.3.tar.gz")
set(cargs_HASH "SHA256=ddba25bd35e9c6c75bc706c126001b8ce8e084d40ef37050e6aa6963e836eb8b")
# If you don't have access to the Internet,
# please pre-download asio
set(possible_file_locations
$ENV{HOME}/Downloads/cargs-v1-0-3.tar.gz
${PROJECT_SOURCE_DIR}/cargs-v1-0-3.tar.gz
${PROJECT_BINARY_DIR}/cargs-v1-0-3.tar.gz
/tmp/cargs-v1-0-3.tar.gz
/star-fj/fangjun/download/github/cargs-v1-0-3.tar.gz
)
foreach(f IN LISTS possible_file_locations)
if(EXISTS ${f})
set(cargs_URL "${f}")
file(TO_CMAKE_PATH "${cargs_URL}" cargs_URL)
break()
endif()
endforeach()
FetchContent_Declare(cargs URL ${cargs_URL} URL_HASH ${cargs_HASH})
FetchContent_GetProperties(cargs)
if(NOT cargs_POPULATED)
message(STATUS "Downloading cargs ${cargs_URL}")
FetchContent_Populate(cargs)
endif()
message(STATUS "cargs is downloaded to ${cargs_SOURCE_DIR}")
add_subdirectory(${cargs_SOURCE_DIR} ${cargs_BINARY_DIR} EXCLUDE_FROM_ALL)
endfunction()
download_cargs()
... ...
... ... @@ -41,6 +41,7 @@ SherpaOnnxOnlineRecognizer *CreateOnlineRecognizer(
recognizer_config.model_config.joiner_filename = config->model_config.joiner;
recognizer_config.model_config.tokens = config->model_config.tokens;
recognizer_config.model_config.num_threads = config->model_config.num_threads;
recognizer_config.model_config.provider = config->model_config.provider;
recognizer_config.model_config.debug = config->model_config.debug;
recognizer_config.decoding_method = config->decoding_method;
... ...
... ... @@ -52,6 +52,7 @@ SHERPA_ONNX_API typedef struct SherpaOnnxOnlineTransducerModelConfig {
const char *joiner;
const char *tokens;
int32_t num_threads;
const char *provider;
int32_t debug; // true to print debug information of the model
} SherpaOnnxOnlineTransducerModelConfig;
... ...
... ... @@ -17,6 +17,8 @@ void OnlineTransducerModelConfig::Register(ParseOptions *po) {
po->Register("tokens", &tokens, "Path to tokens.txt");
po->Register("num_threads", &num_threads,
"Number of threads to run the neural network");
po->Register("provider", &provider,
"Specify a provider to use: cpu, cuda, coreml");
po->Register("debug", &debug,
"true to print model information while loading it.");
... ... @@ -60,6 +62,7 @@ std::string OnlineTransducerModelConfig::ToString() const {
os << "joiner_filename=\"" << joiner_filename << "\", ";
os << "tokens=\"" << tokens << "\", ";
os << "num_threads=" << num_threads << ", ";
os << "provider=\"" << provider << "\", ";
os << "debug=" << (debug ? "True" : "False") << ")";
return os.str();
... ...
... ... @@ -69,17 +69,17 @@ for a list of pre-trained models to download.
fprintf(stderr, "Creating recognizer ...\n");
sherpa_onnx::OfflineRecognizer recognizer(config);
auto begin = std::chrono::steady_clock::now();
const auto begin = std::chrono::steady_clock::now();
fprintf(stderr, "Started\n");
std::vector<std::unique_ptr<sherpa_onnx::OfflineStream>> ss;
std::vector<sherpa_onnx::OfflineStream *> ss_pointers;
float duration = 0;
for (int32_t i = 1; i <= po.NumArgs(); ++i) {
std::string wav_filename = po.GetArg(i);
const std::string wav_filename = po.GetArg(i);
int32_t sampling_rate = -1;
bool is_ok = false;
std::vector<float> samples =
const std::vector<float> samples =
sherpa_onnx::ReadWave(wav_filename, &sampling_rate, &is_ok);
if (!is_ok) {
fprintf(stderr, "Failed to read %s\n", wav_filename.c_str());
... ... @@ -96,7 +96,7 @@ for a list of pre-trained models to download.
recognizer.DecodeStreams(ss_pointers.data(), ss_pointers.size());
auto end = std::chrono::steady_clock::now();
const auto end = std::chrono::steady_clock::now();
fprintf(stderr, "Done!\n\n");
for (int32_t i = 1; i <= po.NumArgs(); ++i) {
... ...
... ... @@ -11,22 +11,28 @@
#include "sherpa-onnx/csrc/online-recognizer.h"
#include "sherpa-onnx/csrc/online-stream.h"
#include "sherpa-onnx/csrc/symbol-table.h"
#include "sherpa-onnx/csrc/parse-options.h"
#include "sherpa-onnx/csrc/wave-reader.h"
// TODO(fangjun): Use ParseOptions as we are getting more args
int main(int32_t argc, char *argv[]) {
if (argc < 6 || argc > 9) {
const char *usage = R"usage(
const char *kUsageMessage = R"usage(
Usage:
./bin/sherpa-onnx \
/path/to/tokens.txt \
/path/to/encoder.onnx \
/path/to/decoder.onnx \
/path/to/joiner.onnx \
/path/to/foo.wav [num_threads [decoding_method [/path/to/rnn_lm.onnx]]]
--tokens=/path/to/tokens.txt \
--encoder=/path/to/encoder.onnx \
--decoder=/path/to/decoder.onnx \
--joiner=/path/to/joiner.onnx \
--provider=cpu \
--num-threads=2 \
--decoding-method=greedy_search \
/path/to/foo.wav [bar.wav foobar.wav ...]
Note: It supports decoding multiple files in batches
Default value for num_threads is 2.
Valid values for decoding_method: greedy_search (default), modified_beam_search.
Valid values for provider: cpu (default), cuda, coreml.
foo.wav should be of single channel, 16-bit PCM encoded wave file; its
sampling rate can be arbitrary and does not need to be 16kHz.
... ... @@ -34,33 +40,17 @@ Please refer to
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
for a list of pre-trained models to download.
)usage";
fprintf(stderr, "%s\n", usage);
return 0;
}
sherpa_onnx::ParseOptions po(kUsageMessage);
sherpa_onnx::OnlineRecognizerConfig config;
config.model_config.tokens = argv[1];
config.Register(&po);
config.model_config.debug = false;
config.model_config.encoder_filename = argv[2];
config.model_config.decoder_filename = argv[3];
config.model_config.joiner_filename = argv[4];
std::string wav_filename = argv[5];
config.model_config.num_threads = 2;
if (argc == 7 && atoi(argv[6]) > 0) {
config.model_config.num_threads = atoi(argv[6]);
}
if (argc == 8) {
config.decoding_method = argv[7];
po.Read(argc, argv);
if (po.NumArgs() < 1) {
po.PrintUsage();
exit(EXIT_FAILURE);
}
if (argc == 9) {
config.lm_config.model = argv[8];
}
config.max_active_paths = 4;
fprintf(stderr, "%s\n", config.ToString().c_str());
... ... @@ -71,63 +61,66 @@ for a list of pre-trained models to download.
sherpa_onnx::OnlineRecognizer recognizer(config);
int32_t sampling_rate = -1;
bool is_ok = false;
std::vector<float> samples =
sherpa_onnx::ReadWave(wav_filename, &sampling_rate, &is_ok);
if (!is_ok) {
fprintf(stderr, "Failed to read %s\n", wav_filename.c_str());
return -1;
}
fprintf(stderr, "sampling rate of input file: %d\n", sampling_rate);
float duration = samples.size() / static_cast<float>(sampling_rate);
fprintf(stderr, "wav filename: %s\n", wav_filename.c_str());
fprintf(stderr, "wav duration (s): %.3f\n", duration);
auto begin = std::chrono::steady_clock::now();
fprintf(stderr, "Started\n");
auto s = recognizer.CreateStream();
s->AcceptWaveform(sampling_rate, samples.data(), samples.size());
std::vector<float> tail_paddings(static_cast<int>(0.3 * sampling_rate));
// Note: We can call AcceptWaveform() multiple times.
s->AcceptWaveform(sampling_rate, tail_paddings.data(), tail_paddings.size());
// Call InputFinished() to indicate that no audio samples are available
s->InputFinished();
while (recognizer.IsReady(s.get())) {
recognizer.DecodeStream(s.get());
float duration = 0;
for (int32_t i = 1; i <= po.NumArgs(); ++i) {
const std::string wav_filename = po.GetArg(i);
int32_t sampling_rate = -1;
bool is_ok = false;
const std::vector<float> samples =
sherpa_onnx::ReadWave(wav_filename, &sampling_rate, &is_ok);
if (!is_ok) {
fprintf(stderr, "Failed to read %s\n", wav_filename.c_str());
return -1;
}
fprintf(stderr, "sampling rate of input file: %d\n", sampling_rate);
const float duration = samples.size() / static_cast<float>(sampling_rate);
fprintf(stderr, "wav filename: %s\n", wav_filename.c_str());
fprintf(stderr, "wav duration (s): %.3f\n", duration);
fprintf(stderr, "Started\n");
const auto begin = std::chrono::steady_clock::now();
auto s = recognizer.CreateStream();
s->AcceptWaveform(sampling_rate, samples.data(), samples.size());
std::vector<float> tail_paddings(static_cast<int>(0.3 * sampling_rate));
// Note: We can call AcceptWaveform() multiple times.
s->AcceptWaveform(
sampling_rate, tail_paddings.data(), tail_paddings.size());
// Call InputFinished() to indicate that no audio samples are available
s->InputFinished();
while (recognizer.IsReady(s.get())) {
recognizer.DecodeStream(s.get());
}
const std::string text = recognizer.GetResult(s.get()).AsJsonString();
const auto end = std::chrono::steady_clock::now();
const float elapsed_seconds =
std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
.count() / 1000.;
fprintf(stderr, "Done!\n");
fprintf(stderr,
"Recognition result for %s:\n%s\n",
wav_filename.c_str(), text.c_str());
fprintf(stderr, "num threads: %d\n", config.model_config.num_threads);
fprintf(stderr, "decoding method: %s\n", config.decoding_method.c_str());
if (config.decoding_method == "modified_beam_search") {
fprintf(stderr, "max active paths: %d\n", config.max_active_paths);
}
fprintf(stderr, "Elapsed seconds: %.3f s\n", elapsed_seconds);
const float rtf = elapsed_seconds / duration;
fprintf(stderr, "Real time factor (RTF): %.3f / %.3f = %.3f\n",
elapsed_seconds, duration, rtf);
}
std::string text = recognizer.GetResult(s.get()).AsJsonString();
fprintf(stderr, "Done!\n");
fprintf(stderr, "Recognition result for %s:\n%s\n", wav_filename.c_str(),
text.c_str());
auto end = std::chrono::steady_clock::now();
float elapsed_seconds =
std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
.count() /
1000.;
fprintf(stderr, "num threads: %d\n", config.model_config.num_threads);
fprintf(stderr, "decoding method: %s\n", config.decoding_method.c_str());
if (config.decoding_method == "modified_beam_search") {
fprintf(stderr, "max active paths: %d\n", config.max_active_paths);
}
fprintf(stderr, "Elapsed seconds: %.3f s\n", elapsed_seconds);
float rtf = elapsed_seconds / duration;
fprintf(stderr, "Real time factor (RTF): %.3f / %.3f = %.3f\n",
elapsed_seconds, duration, rtf);
return 0;
}
... ...