Fangjun Kuang
Committed by GitHub

Add address sanitizer and undefined behavior sanitizer (#951)

正在显示 56 个修改的文件 包含 354 行增加132 行删除
... ... @@ -137,7 +137,7 @@ jobs:
git push https://csukuangfj:$HF_TOKEN@huggingface.co/csukuangfj/sherpa-onnx-libs main
- name: Release android libs
if: github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa' && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
if: (github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa') && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
... ...
... ... @@ -217,3 +217,11 @@ jobs:
with:
name: flutter-sherpa-onnx-linux-x64
path: ./*.tar.bz2
# - name: Release android libs
# if: (github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa') && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
# uses: svenstaro/upload-release-action@v2
# with:
# file_glob: true
# overwrite: true
# file: flutter*.tar.bz2
... ...
... ... @@ -180,3 +180,11 @@ jobs:
with:
name: flutter-sherpa-onnx-app-macos-${{ matrix.arch }}
path: ./*.tar.bz2
- name: Release android libs
if: (github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa') && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
overwrite: true
file: flutter*.tar.bz2
... ...
... ... @@ -157,3 +157,11 @@ jobs:
with:
name: flutter-sherpa-onnx-windows-x64
path: ./*.tar.bz2
- name: Release android libs
if: (github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa') && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
uses: svenstaro/upload-release-action@v2
with:
file_glob: true
overwrite: true
file: flutter*.tar.bz2
... ...
name: sanitizer
on:
workflow_dispatch:
schedule:
# minute (0-59)
# hour (0-23)
# day of the month (1-31)
# month (1-12)
# day of the week (0-6)
# nightly build at 22:50 UTC time every day
- cron: "50 22 * * *"
concurrency:
group: sanitizer-${{ github.ref }}
cancel-in-progress: true
jobs:
sanitizer:
runs-on: ${{ matrix.os }}
name: sanitizer
strategy:
fail-fast: false
matrix:
os: [macos-latest]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: ccache
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ matrix.os }}-sanitizer
- name: Configure CMake
shell: bash
run: |
export CMAKE_CXX_COMPILER_LAUNCHER=ccache
export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH"
cmake --version
mkdir build
cd build
cmake \
-DSHERPA_ONNX_ENABLE_PYTHON=ON \
-DSHERPA_ONNX_ENABLE_TESTS=ON \
-DSHERPA_ONNX_ENABLE_JNI=ON \
-DSHERPA_ONNX_ENABLE_SANITIZER=ON \
-D BUILD_SHARED_LIBS=ON \
-D CMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=./install \
..
- name: Build sherpa-onnx
shell: bash
run: |
export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH"
cd build
make -j2
make install
ls -lh lib
ls -lh bin
file ./bin/sherpa-onnx
- name: Display dependencies of sherpa-onnx for macos
shell: bash
run: |
file bin/sherpa-onnx
otool -L build/bin/sherpa-onnx
otool -l build/bin/sherpa-onnx
- name: Test offline transducer
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline
.github/scripts/test-offline-transducer.sh
- name: Test online CTC
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx
.github/scripts/test-online-ctc.sh
- name: Test offline punctuation
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline-punctuation
.github/scripts/test-offline-punctuation.sh
- name: Test C API
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export SLID_EXE=spoken-language-identification-c-api
export SID_EXE=speaker-identification-c-api
export AT_EXE=audio-tagging-c-api
export PUNCT_EXE=add-punctuation-c-api
.github/scripts/test-c-api.sh
- name: Test Audio tagging
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline-audio-tagging
.github/scripts/test-audio-tagging.sh
- name: Test spoken language identification (C++ API)
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline-language-identification
.github/scripts/test-spoken-language-identification.sh
- name: Test transducer kws
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-keyword-spotter
.github/scripts/test-kws.sh
- name: Test offline TTS
if: matrix.with_tts == 'ON'
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline-tts
.github/scripts/test-offline-tts.sh
- name: Test online paraformer
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx
.github/scripts/test-online-paraformer.sh
- name: Test offline Whisper
if: matrix.build_type != 'Debug'
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline
.github/scripts/test-offline-whisper.sh
- name: Test offline CTC
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx-offline
.github/scripts/test-offline-ctc.sh
- name: Test online transducer
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=sherpa-onnx
.github/scripts/test-online-transducer.sh
- name: Test online transducer (C API)
shell: bash
run: |
export PATH=$PWD/build/bin:$PATH
export EXE=decode-file-c-api
.github/scripts/test-online-transducer.sh
... ...
... ... @@ -2,6 +2,8 @@ cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.14" CACHE STRING "Minimum OS X deployment version. Used only for macOS")
project(sherpa-onnx)
set(SHERPA_ONNX_VERSION "1.9.26")
... ... @@ -32,6 +34,7 @@ option(SHERPA_ONNX_ENABLE_BINARY "Whether to build binaries" ON)
option(SHERPA_ONNX_ENABLE_TTS "Whether to build TTS related code" ON)
option(SHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY "True to link libstdc++ statically. Used only when BUILD_SHARED_LIBS is OFF on Linux" ON)
option(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE "True to use pre-installed onnxruntime if available" ON)
option(SHERPA_ONNX_ENABLE_SANITIZER "Whether to enable ubsan and asan" OFF)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
... ... @@ -121,6 +124,7 @@ message(STATUS "SHERPA_ONNX_ENABLE_BINARY ${SHERPA_ONNX_ENABLE_BINARY}")
message(STATUS "SHERPA_ONNX_ENABLE_TTS ${SHERPA_ONNX_ENABLE_TTS}")
message(STATUS "SHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY ${SHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY}")
message(STATUS "SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE ${SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE}")
message(STATUS "SHERPA_ONNX_ENABLE_SANITIZER: ${SHERPA_ONNX_ENABLE_SANITIZER}")
if(SHERPA_ONNX_ENABLE_TTS)
message(STATUS "TTS is enabled")
... ... @@ -267,6 +271,33 @@ if(SHERPA_ONNX_ENABLE_TTS)
include(cppjieba) # For Chinese TTS. It is a header-only C++ library
endif()
# if(NOT MSVC AND CMAKE_BUILD_TYPE STREQUAL Debug AND (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang"))
if(SHERPA_ONNX_ENABLE_SANITIZER)
message(WARNING "enable ubsan and asan")
set(CMAKE_REQUIRED_LIBRARIES -lubsan -lasan)
include(CheckCCompilerFlag)
set(flags -fsanitize=undefined )
string(APPEND flags " -fno-sanitize-recover=undefined ")
string(APPEND flags " -fsanitize=integer ")
string(APPEND flags " -fsanitize=nullability ")
string(APPEND flags " -fsanitize=implicit-conversion ")
string(APPEND flags " -fsanitize=bounds ")
string(APPEND flags " -fsanitize=address ")
if(OFF)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flags} -Wall -Wextra")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flags} -Wall -Wextra")
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flags}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flags}")
endif()
set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${flags}")
add_compile_options(-fno-omit-frame-pointer)
endif()
add_subdirectory(sherpa-onnx)
if(SHERPA_ONNX_ENABLE_C_API AND SHERPA_ONNX_ENABLE_BINARY)
... ...
... ... @@ -43,8 +43,8 @@ int32_t main() {
const char *texts[] = {
"这是一个测试你好吗How are you我很好thank you are you ok谢谢你",
"我们都是木头人不会说话不会动",
"The African blogosphere is rapidly expanding bringing more voices "
"online in the form of commentaries opinions analyses rants and poetry",
("The African blogosphere is rapidly expanding bringing more voices "
"online in the form of commentaries opinions analyses rants and poetry"),
};
int32_t n = sizeof(texts) / sizeof(const char *);
... ...
... ... @@ -179,8 +179,6 @@ int32_t main(int32_t argc, char *argv[]) {
#define N 3200 // 0.2 s. Sample rate is fixed to 16 kHz
int16_t buffer[N];
float samples[N];
fprintf(stderr, "sample rate: %d, num samples: %d, duration: %.2f s\n",
wave->sample_rate, wave->num_samples,
(float)wave->num_samples / wave->sample_rate);
... ...
... ... @@ -65,8 +65,6 @@ int32_t main() {
// simulate streaming. You can choose an arbitrary N
#define N 3200
int16_t buffer[N];
float samples[N];
fprintf(stderr, "sample rate: %d, num samples: %d, duration: %.2f s\n",
wave->sample_rate, wave->num_samples,
(float)wave->num_samples / wave->sample_rate);
... ...
... ... @@ -14,10 +14,7 @@ function(download_espeak_ng_for_piper)
set(USE_SPEECHPLAYER OFF CACHE BOOL "" FORCE)
set(EXTRA_cmn ON CACHE BOOL "" FORCE)
set(EXTRA_ru ON CACHE BOOL "" FORCE)
if(SHERPA_ONNX_ENABLE_WASM)
set(BUILD_ESPEAK_NG_EXE OFF CACHE BOOL "" FORCE)
endif()
set(BUILD_ESPEAK_NG_EXE OFF CACHE BOOL "" FORCE)
# If you don't have access to the Internet,
# please pre-download kaldi-decoder
... ...
... ... @@ -187,7 +187,7 @@ const SherpaOnnxOnlineRecognizerResult *GetOnlineStreamResult(
r->text = pText;
// copy json
const auto &json = result.AsJsonString();
std::string json = result.AsJsonString();
char *pJson = new char[json.size() + 1];
std::copy(json.begin(), json.end(), pJson);
pJson[json.size()] = 0;
... ... @@ -445,7 +445,7 @@ const SherpaOnnxOfflineRecognizerResult *GetOfflineStreamResult(
r->text = pText;
// copy json
const auto &json = result.AsJsonString();
std::string json = result.AsJsonString();
char *pJson = new char[json.size() + 1];
std::copy(json.begin(), json.end(), pJson);
pJson[json.size()] = 0;
... ... @@ -643,7 +643,7 @@ const SherpaOnnxKeywordResult *GetKeywordResult(
r->keyword = pKeyword;
// copy json
const auto &json = result.AsJsonString();
std::string json = result.AsJsonString();
char *pJson = new char[json.size() + 1];
std::copy(json.begin(), json.end(), pJson);
pJson[json.size()] = 0;
... ...
... ... @@ -73,7 +73,7 @@ class AudioTaggingCEDImpl : public AudioTaggingImpl {
std::vector<float> f = s->GetFrames();
int32_t num_frames = f.size() / feat_dim;
assert(feat_dim * num_frames == f.size());
assert(feat_dim * num_frames == static_cast<int32_t>(f.size()));
std::array<int64_t, 3> shape = {1, num_frames, feat_dim};
... ...
... ... @@ -65,7 +65,7 @@ void AudioTaggingLabels::Init(std::istream &is) {
exit(-1);
}
if (i != names_.size()) {
if (i != static_cast<int32_t>(names_.size())) {
SHERPA_ONNX_LOGE(
"Index should be sorted and contiguous. Expected index: %d, given: "
"%d.",
... ...
... ... @@ -74,7 +74,7 @@ class AudioTaggingZipformerImpl : public AudioTaggingImpl {
int32_t num_frames = f.size() / feat_dim;
assert(feat_dim * num_frames == f.size());
assert(feat_dim * num_frames == static_cast<int32_t>(f.size()));
std::array<int64_t, 3> shape = {1, num_frames, feat_dim};
... ...
... ... @@ -92,7 +92,7 @@ Ort::Value Cat(OrtAllocator *allocator,
}
}
return std::move(ans);
return ans;
}
template Ort::Value Cat<float>(OrtAllocator *allocator,
... ...
... ... @@ -62,7 +62,6 @@ void CircularBuffer::Resize(int32_t new_capacity) {
new_buffer.begin() + dest);
} else {
int32_t first_part = new_capacity - dest;
int32_t second_part = part1_size - first_part;
std::copy(buffer_.begin() + start, buffer_.begin() + start + first_part,
new_buffer.begin() + dest);
... ...
... ... @@ -27,7 +27,7 @@ void ContextGraph::Build(const std::vector<std::vector<int32_t>> &token_ids,
if (!ac_thresholds.empty()) {
SHERPA_ONNX_CHECK_EQ(token_ids.size(), ac_thresholds.size());
}
for (int32_t i = 0; i < token_ids.size(); ++i) {
for (int32_t i = 0; i < static_cast<int32_t>(token_ids.size()); ++i) {
auto node = root_.get();
float score = scores.empty() ? 0.0f : scores[i];
score = score == 0.0f ? context_score_ : score;
... ... @@ -35,10 +35,10 @@ void ContextGraph::Build(const std::vector<std::vector<int32_t>> &token_ids,
ac_threshold = ac_threshold == 0.0f ? ac_threshold_ : ac_threshold;
std::string phrase = phrases.empty() ? std::string() : phrases[i];
for (int32_t j = 0; j < token_ids[i].size(); ++j) {
for (int32_t j = 0; j < static_cast<int32_t>(token_ids[i].size()); ++j) {
int32_t token = token_ids[i][j];
if (0 == node->next.count(token)) {
bool is_end = j == token_ids[i].size() - 1;
bool is_end = j == (static_cast<int32_t>(token_ids[i].size()) - 1);
node->next[token] = std::make_unique<ContextState>(
token, score, node->node_score + score,
is_end ? node->node_score + score : 0, j + 1,
... ... @@ -49,11 +49,11 @@ void ContextGraph::Build(const std::vector<std::vector<int32_t>> &token_ids,
node->next[token]->token_score = token_score;
float node_score = node->node_score + token_score;
node->next[token]->node_score = node_score;
bool is_end =
(j == token_ids[i].size() - 1) || node->next[token]->is_end;
bool is_end = (j == static_cast<int32_t>(token_ids[i].size()) - 1) ||
node->next[token]->is_end;
node->next[token]->output_score = is_end ? node_score : 0.0f;
node->next[token]->is_end = is_end;
if (j == token_ids[i].size() - 1) {
if (j == static_cast<int32_t>(token_ids[i].size()) - 1) {
node->next[token]->phrase = phrase;
node->next[token]->ac_threshold = ac_threshold;
}
... ...
... ... @@ -129,8 +129,8 @@ class Hypotheses {
return os.str();
}
const auto begin() const { return hyps_dict_.begin(); }
const auto end() const { return hyps_dict_.end(); }
auto begin() const { return hyps_dict_.begin(); }
auto end() const { return hyps_dict_.end(); }
auto begin() { return hyps_dict_.begin(); }
auto end() { return hyps_dict_.end(); }
... ...
... ... @@ -217,7 +217,7 @@ JiebaLexicon::JiebaLexicon(const std::string &lexicon,
debug)) {}
std::vector<std::vector<int64_t>> JiebaLexicon::ConvertTextToTokenIds(
const std::string &text, const std::string &unused_voice /*= ""*/) const {
const std::string &text, const std::string & /*unused_voice = ""*/) const {
return impl_->ConvertTextToTokenIds(text);
}
... ...
... ... @@ -69,7 +69,7 @@ class OfflineCtTransformerModel::Impl {
int32_t vocab_size;
SHERPA_ONNX_READ_META_DATA(vocab_size, "vocab_size");
if (tokens.size() != vocab_size) {
if (static_cast<int32_t>(tokens.size()) != vocab_size) {
SHERPA_ONNX_LOGE("tokens.size() %d != vocab_size %d",
static_cast<int32_t>(tokens.size()), vocab_size);
exit(-1);
... ...
... ... @@ -71,7 +71,7 @@ class OfflinePunctuationCtTransformerImpl : public OfflinePunctuationImpl {
for (int32_t i = 0; i != num_segments; ++i) {
int32_t this_start = i * segment_size; // inclusive
int32_t this_end = this_start + segment_size; // exclusive
if (this_end > token_ids.size()) {
if (this_end > static_cast<int32_t>(token_ids.size())) {
this_end = token_ids.size();
}
... ... @@ -155,7 +155,7 @@ class OfflinePunctuationCtTransformerImpl : public OfflinePunctuationImpl {
std::vector<std::string> words_punct;
for (int32_t i = 0; i != static_cast<int32_t>(punctuations.size()); ++i) {
if (i >= tokens.size()) {
if (i >= static_cast<int32_t>(tokens.size())) {
break;
}
std::string &w = tokens[i];
... ...
... ... @@ -96,7 +96,7 @@ OfflineTtsCharacterFrontend::OfflineTtsCharacterFrontend(
std::vector<std::vector<int64_t>>
OfflineTtsCharacterFrontend::ConvertTextToTokenIds(
const std::string &_text, const std::string &voice /*= ""*/) const {
const std::string &_text, const std::string & /*voice = ""*/) const {
// see
// https://github.com/coqui-ai/TTS/blob/dev/TTS/tts/utils/text/tokenizer.py#L87
int32_t use_eos_bos = meta_data_.use_eos_bos;
... ... @@ -151,7 +151,7 @@ OfflineTtsCharacterFrontend::ConvertTextToTokenIds(
this_sentence.push_back(eos_id);
}
if (this_sentence.size() > 1 + use_eos_bos) {
if (static_cast<int32_t>(this_sentence.size()) > 1 + use_eos_bos) {
ans.push_back(std::move(this_sentence));
}
} else {
... ...
... ... @@ -238,7 +238,7 @@ class OfflineTtsVitsImpl : public OfflineTtsImpl {
}
batch.clear();
while (k < x.size()) {
while (k < static_cast<int32_t>(x.size())) {
batch.push_back(std::move(x[k]));
++k;
}
... ...
... ... @@ -91,8 +91,8 @@ struct OnlineRecognizerConfig {
int32_t max_active_paths = 4;
/// used only for modified_beam_search
float hotwords_score = 1.5;
std::string hotwords_file;
float hotwords_score = 1.5;
float blank_penalty = 0.0;
... ... @@ -102,17 +102,12 @@ struct OnlineRecognizerConfig {
OnlineRecognizerConfig(
const FeatureExtractorConfig &feat_config,
const OnlineModelConfig &model_config,
const OnlineLMConfig &lm_config,
const OnlineModelConfig &model_config, const OnlineLMConfig &lm_config,
const EndpointConfig &endpoint_config,
const OnlineCtcFstDecoderConfig &ctc_fst_decoder_config,
bool enable_endpoint,
const std::string &decoding_method,
int32_t max_active_paths,
const std::string &hotwords_file,
float hotwords_score,
float blank_penalty,
float temperature_scale)
bool enable_endpoint, const std::string &decoding_method,
int32_t max_active_paths, const std::string &hotwords_file,
float hotwords_score, float blank_penalty, float temperature_scale)
: feat_config(feat_config),
model_config(model_config),
lm_config(lm_config),
... ...
... ... @@ -95,15 +95,15 @@ class OnlineTransducerDecoder {
* online decoding case, each utterance has the same number of frames
* and there are no paddings.
*/
virtual void Decode(Ort::Value encoder_out, OnlineStream **ss,
std::vector<OnlineTransducerDecoderResult> *result) {
virtual void Decode(Ort::Value /*encoder_out*/, OnlineStream ** /*ss*/,
std::vector<OnlineTransducerDecoderResult> * /*result*/) {
SHERPA_ONNX_LOGE(
"This interface is for OnlineTransducerModifiedBeamSearchDecoder.");
exit(-1);
}
// used for endpointing. We need to keep decoder_out after reset
virtual void UpdateDecoderOut(OnlineTransducerDecoderResult *result) {}
virtual void UpdateDecoderOut(OnlineTransducerDecoderResult * /*result*/) {}
};
} // namespace sherpa_onnx
... ...
... ... @@ -77,7 +77,7 @@ void OnlineTransducerGreedySearchDecoder::Decode(
std::vector<int64_t> encoder_out_shape =
encoder_out.GetTensorTypeAndShapeInfo().GetShape();
if (encoder_out_shape[0] != result->size()) {
if (encoder_out_shape[0] != static_cast<int32_t>(result->size())) {
SHERPA_ONNX_LOGE(
"Size mismatch! encoder_out.size(0) %d, result.size(0): %d",
static_cast<int32_t>(encoder_out_shape[0]),
... ...
... ... @@ -69,7 +69,7 @@ class OnlineTransducerModel {
* This has to be called before GetEncoderInitStates(), so the `encoder_embed`
* init state has the correct `embed_dim` of its output.
*/
virtual void SetFeatureDim(int32_t feature_dim) {}
virtual void SetFeatureDim(int32_t /*feature_dim*/) {}
/** Run the encoder.
*
... ...
... ... @@ -17,7 +17,7 @@ namespace sherpa_onnx {
static void UseCachedDecoderOut(
const std::vector<int32_t> &hyps_row_splits,
const std::vector<OnlineTransducerDecoderResult> &results,
int32_t context_size, Ort::Value *decoder_out) {
Ort::Value *decoder_out) {
std::vector<int64_t> shape =
decoder_out->GetTensorTypeAndShapeInfo().GetShape();
... ... @@ -80,7 +80,8 @@ void OnlineTransducerModifiedBeamSearchDecoder::Decode(
std::vector<int64_t> encoder_out_shape =
encoder_out.GetTensorTypeAndShapeInfo().GetShape();
if (encoder_out_shape[0] != result->size()) {
if (static_cast<int32_t>(encoder_out_shape[0]) !=
static_cast<int32_t>(result->size())) {
SHERPA_ONNX_LOGE(
"Size mismatch! encoder_out.size(0) %d, result.size(0): %d\n",
static_cast<int32_t>(encoder_out_shape[0]),
... ... @@ -117,8 +118,7 @@ void OnlineTransducerModifiedBeamSearchDecoder::Decode(
Ort::Value decoder_input = model_->BuildDecoderInput(prev);
Ort::Value decoder_out = model_->RunDecoder(std::move(decoder_input));
if (t == 0) {
UseCachedDecoderOut(hyps_row_splits, *result, model_->ContextSize(),
&decoder_out);
UseCachedDecoderOut(hyps_row_splits, *result, &decoder_out);
}
Ort::Value cur_encoder_out =
... ... @@ -136,10 +136,9 @@ void OnlineTransducerModifiedBeamSearchDecoder::Decode(
int32_t p_logit_items = vocab_size * num_hyps;
std::vector<float> logit_with_temperature(p_logit_items);
{
std::copy(p_logit,
p_logit + p_logit_items,
std::copy(p_logit, p_logit + p_logit_items,
logit_with_temperature.begin());
for (float& elem : logit_with_temperature) {
for (float &elem : logit_with_temperature) {
elem /= temperature_scale_;
}
LogSoftmax(logit_with_temperature.data(), vocab_size, num_hyps);
... ... @@ -226,7 +225,7 @@ void OnlineTransducerModifiedBeamSearchDecoder::Decode(
cur.push_back(std::move(hyps));
p_logprob += (end - start) * vocab_size;
} // for (int32_t b = 0; b != batch_size; ++b)
} // for (int32_t t = 0; t != num_frames; ++t)
} // for (int32_t t = 0; t != num_frames; ++t)
for (int32_t b = 0; b != batch_size; ++b) {
auto &hyps = cur[b];
... ... @@ -242,7 +241,7 @@ void OnlineTransducerModifiedBeamSearchDecoder::Decode(
void OnlineTransducerModifiedBeamSearchDecoder::UpdateDecoderOut(
OnlineTransducerDecoderResult *result) {
if (result->tokens.size() == model_->ContextSize()) {
if (static_cast<int32_t>(result->tokens.size()) == model_->ContextSize()) {
result->decoder_out = Ort::Value{nullptr};
return;
}
... ...
... ... @@ -51,7 +51,7 @@ class Client {
c_.init_asio(&io_);
c_.set_open_handler([this](connection_hdl hdl) { OnOpen(hdl); });
c_.set_close_handler(
[this](connection_hdl /*hdl*/) { SHERPA_ONNX_LOGE("Disconnected"); });
[](connection_hdl /*hdl*/) { SHERPA_ONNX_LOGE("Disconnected"); });
c_.set_message_handler(
[this](connection_hdl hdl, message_ptr msg) { OnMessage(hdl, msg); });
... ...
... ... @@ -34,8 +34,8 @@ namespace sherpa_onnx {
OnlineZipformer2TransducerModel::OnlineZipformer2TransducerModel(
const OnlineModelConfig &config)
: env_(ORT_LOGGING_LEVEL_WARNING),
config_(config),
sess_opts_(GetSessionOptions(config)),
config_(config),
allocator_{} {
{
auto buf = ReadFile(config.transducer.encoder);
... ... @@ -179,7 +179,6 @@ void OnlineZipformer2TransducerModel::InitJoiner(void *model_data,
std::vector<Ort::Value> OnlineZipformer2TransducerModel::StackStates(
const std::vector<std::vector<Ort::Value>> &states) const {
int32_t batch_size = static_cast<int32_t>(states.size());
int32_t num_encoders = static_cast<int32_t>(num_encoder_layers_.size());
std::vector<const Ort::Value *> buf(batch_size);
... ... @@ -255,10 +254,9 @@ OnlineZipformer2TransducerModel::UnStackStates(
const std::vector<Ort::Value> &states) const {
int32_t m = std::accumulate(num_encoder_layers_.begin(),
num_encoder_layers_.end(), 0);
assert(states.size() == m * 6 + 2);
assert(static_cast<int32_t>(states.size()) == m * 6 + 2);
int32_t batch_size = states[0].GetTensorTypeAndShapeInfo().GetShape()[1];
int32_t num_encoders = num_encoder_layers_.size();
std::vector<std::vector<Ort::Value>> ans;
ans.resize(batch_size);
... ... @@ -266,7 +264,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
for (int32_t i = 0; i != m; ++i) {
{
auto v = Unbind(allocator_, &states[i * 6], 1);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -274,7 +272,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
}
{
auto v = Unbind(allocator_, &states[i * 6 + 1], 1);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -282,7 +280,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
}
{
auto v = Unbind(allocator_, &states[i * 6 + 2], 1);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -290,7 +288,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
}
{
auto v = Unbind(allocator_, &states[i * 6 + 3], 1);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -298,7 +296,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
}
{
auto v = Unbind(allocator_, &states[i * 6 + 4], 0);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -306,7 +304,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
}
{
auto v = Unbind(allocator_, &states[i * 6 + 5], 0);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -316,7 +314,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
{
auto v = Unbind(allocator_, &states[m * 6], 0);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ... @@ -324,7 +322,7 @@ OnlineZipformer2TransducerModel::UnStackStates(
}
{
auto v = Unbind<int64_t>(allocator_, &states[m * 6 + 1], 0);
assert(v.size() == batch_size);
assert(static_cast<int32_t>(v.size()) == batch_size);
for (int32_t n = 0; n != batch_size; ++n) {
ans[n].push_back(std::move(v[n]));
... ...
... ... @@ -246,7 +246,7 @@ static bool MustBeQuoted(const std::string &str, ShellType st) {
// Our aim is to print out the command line in such a way that if it's
// pasted into a shell of ShellType "st" (only bash for now), it
// will get passed to the program in the same way.
static std::string QuoteAndEscape(const std::string &str, ShellType st) {
static std::string QuoteAndEscape(const std::string &str, ShellType /*st*/) {
// Only Bash is supported (for the moment).
SHERPA_ONNX_CHECK_EQ(st, kBash) << "Invalid shell type.";
... ...
... ... @@ -31,7 +31,7 @@ static int32_t RecordCallback(const void *input_buffer,
return stop ? paComplete : paContinue;
}
static void Handler(int32_t sig) {
static void Handler(int32_t /*sig*/) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
}
... ... @@ -124,8 +124,6 @@ for a list of pre-trained models to download.
mic_sample_rate = atof(pSampleRateStr);
}
float sample_rate = 16000;
PaStream *stream;
PaError err =
Pa_OpenStream(&stream, &param, nullptr, /* &outputParameters, */
... ...
... ... @@ -62,7 +62,7 @@ static int32_t RecordCallback(const void *input_buffer,
unsigned long frames_per_buffer, // NOLINT
const PaStreamCallbackTimeInfo * /*time_info*/,
PaStreamCallbackFlags /*status_flags*/,
void *user_data) {
void * /*user_data*/) {
std::lock_guard<std::mutex> lock(samples_mutex);
auto p = reinterpret_cast<const float *>(input_buffer);
... ... @@ -71,7 +71,7 @@ static int32_t RecordCallback(const void *input_buffer,
return stop ? paComplete : paContinue;
}
static void Handler(int32_t sig) {
static void Handler(int32_t /*sig*/) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Press Enter to exit\n");
}
... ... @@ -165,8 +165,6 @@ for more models.
mic_sample_rate = atof(pSampleRateStr);
}
float sample_rate = 16000;
PaStream *stream;
PaError err =
Pa_OpenStream(&stream, &param, nullptr, /* &outputParameters, */
... ...
... ... @@ -62,7 +62,7 @@ static int32_t RecordCallback(const void *input_buffer,
unsigned long frames_per_buffer, // NOLINT
const PaStreamCallbackTimeInfo * /*time_info*/,
PaStreamCallbackFlags /*status_flags*/,
void *user_data) {
void * /*user_data*/) {
std::lock_guard<std::mutex> lock(samples_mutex);
auto p = reinterpret_cast<const float *>(input_buffer);
... ... @@ -71,7 +71,7 @@ static int32_t RecordCallback(const void *input_buffer,
return stop ? paComplete : paContinue;
}
static void Handler(int32_t sig) {
static void Handler(int32_t /*sig*/) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Press Enter to exit\n");
}
... ... @@ -180,7 +180,6 @@ for a list of pre-trained models to download.
fprintf(stderr, "Use sample rate %f for mic\n", mic_sample_rate);
mic_sample_rate = atof(pSampleRateStr);
}
float sample_rate = 16000;
PaStream *stream;
PaError err =
... ...
... ... @@ -33,7 +33,7 @@ static int32_t RecordCallback(const void *input_buffer,
return stop ? paComplete : paContinue;
}
static void Handler(int32_t sig) {
static void Handler(int32_t /*sig*/) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
}
... ...
... ... @@ -85,7 +85,7 @@ void AsrInference(const std::vector<std::vector<std::string>> &chunk_wav_paths,
while (true) {
int chunk = wav_index.fetch_add(1);
if (chunk >= chunk_wav_paths.size()) {
if (chunk >= static_cast<int32_t>(chunk_wav_paths.size())) {
break;
}
const auto &wav_paths = chunk_wav_paths[chunk];
... ...
... ... @@ -85,7 +85,7 @@ static int PlayCallback(const void * /*in*/, void *out,
}
int32_t k = 0;
for (; k < n && !g_buffer.samples.empty();) {
for (; k < static_cast<int32_t>(n) && !g_buffer.samples.empty();) {
int32_t this_block = n - k;
auto &p = g_buffer.samples.front();
... ... @@ -99,7 +99,7 @@ static int PlayCallback(const void * /*in*/, void *out,
k = n;
if (p.consumed == p.data.size()) {
if (p.consumed == static_cast<int32_t>(p.data.size())) {
g_buffer.samples.pop();
}
break;
... ... @@ -110,7 +110,7 @@ static int PlayCallback(const void * /*in*/, void *out,
g_buffer.samples.pop();
}
if (k < n) {
if (k < static_cast<int32_t>(n)) {
std::fill_n(pout + k, n - k, 0);
}
... ... @@ -121,7 +121,7 @@ static int PlayCallback(const void * /*in*/, void *out,
return paContinue;
}
static void PlayCallbackFinished(void *userData) { g_cv.notify_all(); }
static void PlayCallbackFinished(void * /*userData*/) { g_cv.notify_all(); }
static void StartPlayback(int32_t sample_rate) {
int32_t frames_per_buffer = 1024;
... ...
... ... @@ -9,7 +9,7 @@
#include "sherpa-onnx/csrc/parse-options.h"
#include "sherpa-onnx/csrc/wave-writer.h"
void audioCallback(const float *samples, int32_t n, float progress) {
void audioCallback(const float * /*samples*/, int32_t n, float progress) {
printf("sample=%d, progress=%f\n", n, progress);
}
... ...
... ... @@ -25,14 +25,14 @@ static int32_t RecordCallback(const void *input_buffer,
unsigned long frames_per_buffer, // NOLINT
const PaStreamCallbackTimeInfo * /*time_info*/,
PaStreamCallbackFlags /*status_flags*/,
void *user_data) {
void * /*user_data*/) {
std::lock_guard<std::mutex> lock(mutex);
buffer.Push(reinterpret_cast<const float *>(input_buffer), frames_per_buffer);
return stop ? paComplete : paContinue;
}
static void Handler(int32_t sig) {
static void Handler(int32_t /*sig*/) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
}
... ...
... ... @@ -25,14 +25,14 @@ static int32_t RecordCallback(const void *input_buffer,
unsigned long frames_per_buffer, // NOLINT
const PaStreamCallbackTimeInfo * /*time_info*/,
PaStreamCallbackFlags /*status_flags*/,
void *user_data) {
void * /*user_data*/) {
std::lock_guard<std::mutex> lock(mutex);
buffer.Push(reinterpret_cast<const float *>(input_buffer), frames_per_buffer);
return stop ? paComplete : paContinue;
}
static void Handler(int32_t sig) {
static void Handler(int32_t /*sig*/) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
}
... ...
... ... @@ -26,8 +26,6 @@ Ort::Value Slice(OrtAllocator *allocator, const Ort::Value *v,
assert(dim1_start < dim1_end);
assert(dim1_end <= shape[1]);
const T *src = v->GetTensorData<T>();
std::array<int64_t, 3> ans_shape{dim0_end - dim0_start, dim1_end - dim1_start,
shape[2]};
... ...
... ... @@ -50,7 +50,7 @@ class SpeakerEmbeddingManager::Impl {
}
for (const auto &x : embedding_list) {
if (x.size() != dim_) {
if (static_cast<int32_t>(x.size()) != dim_) {
SHERPA_ONNX_LOGE("Given dim: %d, expected dim: %d",
static_cast<int32_t>(x.size()), dim_);
return false;
... ... @@ -224,7 +224,7 @@ bool SpeakerEmbeddingManager::Verify(const std::string &name, const float *p,
}
float SpeakerEmbeddingManager::Score(const std::string &name,
const float *p) const {
const float *p) const {
return impl_->Score(name, p);
}
... ...
... ... @@ -58,7 +58,7 @@ std::vector<Ort::Value> Unbind(OrtAllocator *allocator, const Ort::Value *value,
}
}
return std::move(ans);
return ans;
}
template std::vector<Ort::Value> Unbind<float>(OrtAllocator *allocator,
... ...
... ... @@ -161,7 +161,7 @@ std::vector<float> ReadWaveImpl(std::istream &is, int32_t *sampling_rate,
}
std::vector<float> ans(samples.size());
for (int32_t i = 0; i != ans.size(); ++i) {
for (int32_t i = 0; i != static_cast<int32_t>(ans.size()); ++i) {
ans[i] = samples[i] / 32768.;
}
... ...
... ... @@ -146,13 +146,13 @@ JNIEXPORT jlong JNICALL Java_com_k2fsa_sherpa_onnx_KeywordSpotter_newFromFile(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_KeywordSpotter_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::KeywordSpotter *>(ptr);
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_KeywordSpotter_decode(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
auto kws = reinterpret_cast<sherpa_onnx::KeywordSpotter *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(stream_ptr);
... ... @@ -185,7 +185,7 @@ JNIEXPORT jlong JNICALL Java_com_k2fsa_sherpa_onnx_KeywordSpotter_createStream(
SHERPA_ONNX_EXTERN_C
JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_KeywordSpotter_isReady(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
auto kws = reinterpret_cast<sherpa_onnx::KeywordSpotter *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(stream_ptr);
... ...
... ... @@ -86,7 +86,7 @@ Java_com_k2fsa_sherpa_onnx_OfflinePunctuation_newFromFile(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OfflinePunctuation_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::OfflinePunctuation *>(ptr);
}
... ...
... ... @@ -220,13 +220,13 @@ Java_com_k2fsa_sherpa_onnx_OfflineRecognizer_newFromFile(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OfflineRecognizer_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::OfflineRecognizer *>(ptr);
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT jlong JNICALL
Java_com_k2fsa_sherpa_onnx_OfflineRecognizer_createStream(JNIEnv *env,
Java_com_k2fsa_sherpa_onnx_OfflineRecognizer_createStream(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto recognizer = reinterpret_cast<sherpa_onnx::OfflineRecognizer *>(ptr);
... ... @@ -242,7 +242,7 @@ Java_com_k2fsa_sherpa_onnx_OfflineRecognizer_createStream(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OfflineRecognizer_decode(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong streamPtr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong streamPtr) {
auto recognizer = reinterpret_cast<sherpa_onnx::OfflineRecognizer *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OfflineStream *>(streamPtr);
... ...
... ... @@ -8,7 +8,7 @@
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OfflineStream_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::OfflineStream *>(ptr);
}
... ...
... ... @@ -136,19 +136,19 @@ JNIEXPORT jlong JNICALL Java_com_k2fsa_sherpa_onnx_OfflineTts_newFromFile(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OfflineTts_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::OfflineTts *>(ptr);
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT jint JNICALL Java_com_k2fsa_sherpa_onnx_OfflineTts_getSampleRate(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
return reinterpret_cast<sherpa_onnx::OfflineTts *>(ptr)->SampleRate();
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT jint JNICALL Java_com_k2fsa_sherpa_onnx_OfflineTts_getNumSpeakers(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
return reinterpret_cast<sherpa_onnx::OfflineTts *>(ptr)->NumSpeakers();
}
... ...
... ... @@ -284,13 +284,13 @@ JNIEXPORT jlong JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_newFromFile(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::OnlineRecognizer *>(ptr);
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_reset(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
auto recognizer = reinterpret_cast<sherpa_onnx::OnlineRecognizer *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(stream_ptr);
recognizer->Reset(stream);
... ... @@ -298,7 +298,7 @@ JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_reset(
SHERPA_ONNX_EXTERN_C
JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_isReady(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
auto recognizer = reinterpret_cast<sherpa_onnx::OnlineRecognizer *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(stream_ptr);
... ... @@ -307,7 +307,7 @@ JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_isReady(
SHERPA_ONNX_EXTERN_C
JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_isEndpoint(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
auto recognizer = reinterpret_cast<sherpa_onnx::OnlineRecognizer *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(stream_ptr);
... ... @@ -316,7 +316,7 @@ JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_isEndpoint(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineRecognizer_decode(
JNIEnv *env, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr, jlong stream_ptr) {
auto recognizer = reinterpret_cast<sherpa_onnx::OnlineRecognizer *>(ptr);
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(stream_ptr);
... ...
... ... @@ -8,7 +8,7 @@
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineStream_delete(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
delete reinterpret_cast<sherpa_onnx::OnlineStream *>(ptr);
}
... ... @@ -26,7 +26,7 @@ JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineStream_acceptWaveform(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_OnlineStream_inputFinished(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(ptr);
stream->InputFinished();
}
... ...
... ... @@ -77,7 +77,7 @@ Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_newFromFile(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_delete(JNIEnv *env,
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_delete(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
delete reinterpret_cast<sherpa_onnx::SpeakerEmbeddingExtractor *>(ptr);
... ... @@ -86,7 +86,7 @@ Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_delete(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT jlong JNICALL
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_createStream(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
std::unique_ptr<sherpa_onnx::OnlineStream> s =
reinterpret_cast<sherpa_onnx::SpeakerEmbeddingExtractor *>(ptr)
->CreateStream();
... ... @@ -101,7 +101,7 @@ Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_createStream(
SHERPA_ONNX_EXTERN_C
JNIEXPORT jboolean JNICALL
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_isReady(JNIEnv *env,
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_isReady(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr,
jlong stream_ptr) {
... ... @@ -130,7 +130,7 @@ Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_compute(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT jint JNICALL Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingExtractor_dim(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
auto extractor =
reinterpret_cast<sherpa_onnx::SpeakerEmbeddingExtractor *>(ptr);
return extractor->Dim();
... ...
... ... @@ -17,7 +17,7 @@ Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingManager_create(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingManager_delete(JNIEnv *env,
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingManager_delete(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto manager = reinterpret_cast<sherpa_onnx::SpeakerEmbeddingManager *>(ptr);
... ... @@ -178,7 +178,7 @@ Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingManager_contains(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT jint JNICALL
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingManager_numSpeakers(JNIEnv *env,
Java_com_k2fsa_sherpa_onnx_SpeakerEmbeddingManager_numSpeakers(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto manager = reinterpret_cast<sherpa_onnx::SpeakerEmbeddingManager *>(ptr);
... ...
... ... @@ -101,7 +101,7 @@ Java_com_k2fsa_sherpa_onnx_SpokenLanguageIdentification_newFromFile(
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL
Java_com_k2fsa_sherpa_onnx_SpokenLanguageIdentification_delete(JNIEnv *env,
Java_com_k2fsa_sherpa_onnx_SpokenLanguageIdentification_delete(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
delete reinterpret_cast<sherpa_onnx::SpokenLanguageIdentification *>(ptr);
... ... @@ -110,7 +110,7 @@ Java_com_k2fsa_sherpa_onnx_SpokenLanguageIdentification_delete(JNIEnv *env,
SHERPA_ONNX_EXTERN_C
JNIEXPORT jlong JNICALL
Java_com_k2fsa_sherpa_onnx_SpokenLanguageIdentification_createStream(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
auto slid =
reinterpret_cast<sherpa_onnx::SpokenLanguageIdentification *>(ptr);
std::unique_ptr<sherpa_onnx::OfflineStream> s = slid->CreateStream();
... ...
... ... @@ -97,7 +97,7 @@ JNIEXPORT jlong JNICALL Java_com_k2fsa_sherpa_onnx_Vad_newFromFile(
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_delete(JNIEnv *env,
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_delete(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
delete reinterpret_cast<sherpa_onnx::VoiceActivityDetector *>(ptr);
... ... @@ -117,7 +117,7 @@ JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_acceptWaveform(
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_Vad_empty(JNIEnv *env,
JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_Vad_empty(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto model = reinterpret_cast<sherpa_onnx::VoiceActivityDetector *>(ptr);
... ... @@ -125,7 +125,7 @@ JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_Vad_empty(JNIEnv *env,
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_pop(JNIEnv *env,
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_pop(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto model = reinterpret_cast<sherpa_onnx::VoiceActivityDetector *>(ptr);
... ... @@ -133,7 +133,7 @@ JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_pop(JNIEnv *env,
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_clear(JNIEnv *env,
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_clear(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto model = reinterpret_cast<sherpa_onnx::VoiceActivityDetector *>(ptr);
... ... @@ -161,13 +161,13 @@ Java_com_k2fsa_sherpa_onnx_Vad_front(JNIEnv *env, jobject /*obj*/, jlong ptr) {
SHERPA_ONNX_EXTERN_C
JNIEXPORT bool JNICALL Java_com_k2fsa_sherpa_onnx_Vad_isSpeechDetected(
JNIEnv *env, jobject /*obj*/, jlong ptr) {
JNIEnv * /*env*/, jobject /*obj*/, jlong ptr) {
auto model = reinterpret_cast<sherpa_onnx::VoiceActivityDetector *>(ptr);
return model->IsSpeechDetected();
}
SHERPA_ONNX_EXTERN_C
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_reset(JNIEnv *env,
JNIEXPORT void JNICALL Java_com_k2fsa_sherpa_onnx_Vad_reset(JNIEnv * /*env*/,
jobject /*obj*/,
jlong ptr) {
auto model = reinterpret_cast<sherpa_onnx::VoiceActivityDetector *>(ptr);
... ...
... ... @@ -63,7 +63,11 @@ if(APPLE)
OUTPUT_VARIABLE PYTHON_SITE_PACKAGE_DIR
)
message(STATUS "PYTHON_SITE_PACKAGE_DIR: ${PYTHON_SITE_PACKAGE_DIR}")
target_link_libraries(_sherpa_onnx PRIVATE "-Wl,-rpath,${PYTHON_SITE_PACKAGE_DIR}")
if(PYTHON_SITE_PACKAGE_DIR STREQUAL "")
message(WARNING "PYTHON_SITE_PACKAGE_DIR is empty!")
else()
target_link_libraries(_sherpa_onnx PRIVATE "-Wl,-rpath,${PYTHON_SITE_PACKAGE_DIR}")
endif()
endif()
if(NOT WIN32)
... ...