provider-config.h
3.0 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// sherpa-onnx/csrc/provider-config.h
//
// Copyright (c) 2024 Uniphore (Author: Manickavela)
#ifndef SHERPA_ONNX_CSRC_PROVIDER_CONFIG_H_
#define SHERPA_ONNX_CSRC_PROVIDER_CONFIG_H_
#include <string>
#include "sherpa-onnx/csrc/parse-options.h"
#include "sherpa-onnx/csrc/macros.h"
#include "onnxruntime_cxx_api.h" // NOLINT
namespace sherpa_onnx {
struct CudaConfig {
int32_t cudnn_conv_algo_search = OrtCudnnConvAlgoSearchHeuristic;
CudaConfig() = default;
explicit CudaConfig(int32_t cudnn_conv_algo_search)
: cudnn_conv_algo_search(cudnn_conv_algo_search) {}
void Register(ParseOptions *po);
bool Validate() const;
std::string ToString() const;
};
struct TensorrtConfig {
int64_t trt_max_workspace_size = 2147483647;
int32_t trt_max_partition_iterations = 10;
int32_t trt_min_subgraph_size = 5;
bool trt_fp16_enable = true;
bool trt_detailed_build_log = false;
bool trt_engine_cache_enable = true;
bool trt_timing_cache_enable = true;
std::string trt_engine_cache_path = ".";
std::string trt_timing_cache_path = ".";
bool trt_dump_subgraphs = false;
TensorrtConfig() = default;
TensorrtConfig(int64_t trt_max_workspace_size,
int32_t trt_max_partition_iterations,
int32_t trt_min_subgraph_size,
bool trt_fp16_enable,
bool trt_detailed_build_log,
bool trt_engine_cache_enable,
bool trt_timing_cache_enable,
const std::string &trt_engine_cache_path,
const std::string &trt_timing_cache_path,
bool trt_dump_subgraphs)
: trt_max_workspace_size(trt_max_workspace_size),
trt_max_partition_iterations(trt_max_partition_iterations),
trt_min_subgraph_size(trt_min_subgraph_size),
trt_fp16_enable(trt_fp16_enable),
trt_detailed_build_log(trt_detailed_build_log),
trt_engine_cache_enable(trt_engine_cache_enable),
trt_timing_cache_enable(trt_timing_cache_enable),
trt_engine_cache_path(trt_engine_cache_path),
trt_timing_cache_path(trt_timing_cache_path),
trt_dump_subgraphs(trt_dump_subgraphs) {}
void Register(ParseOptions *po);
bool Validate() const;
std::string ToString() const;
};
struct ProviderConfig {
TensorrtConfig trt_config;
CudaConfig cuda_config;
std::string provider = "cpu";
int32_t device = 0;
// device only used for cuda and trt
ProviderConfig() = default;
ProviderConfig(const std::string &provider,
int32_t device)
: provider(provider), device(device) {}
ProviderConfig(const TensorrtConfig &trt_config,
const CudaConfig &cuda_config,
const std::string &provider,
int32_t device)
: trt_config(trt_config), cuda_config(cuda_config),
provider(provider), device(device) {}
void Register(ParseOptions *po);
bool Validate() const;
std::string ToString() const;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_PROVIDER_CONFIG_H_