mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 09:31:35 +08:00
[lite] Add threads and power_mode option support (#298)
* [cmake] support Android arm64-v8a & armeabi-v7a native c++ sdk * [cmake] fixed patchelf download on mac and android * [lite] Add threads and power_mode option support * [pybind] update runtime pybind for lite power mode * [python] Add set_lite_power_mode api to runtime
This commit is contained in:
@@ -84,6 +84,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
// set size of cached shape while enable mkldnn with paddle inference backend
|
||||
void SetPaddleMKLDNNCacheSize(int size);
|
||||
|
||||
// set the power mode of paddle lite backend.
|
||||
void SetLitePowerMode(int mode);
|
||||
|
||||
// set tensorrt shape while the inputs of model contain dynamic shape
|
||||
// min_shape: the minimum shape
|
||||
// opt_shape: the most common shape while inference, default be empty
|
||||
@@ -126,6 +129,12 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
int pd_mkldnn_cache_size = 1;
|
||||
std::vector<std::string> pd_delete_pass_names;
|
||||
|
||||
// ======Only for Paddle-Lite Backend=====
|
||||
// 0: LITE_POWER_HIGH 1: LITE_POWER_LOW 2: LITE_POWER_FULL
|
||||
// 3: LITE_POWER_NO_BIND 4: LITE_POWER_RAND_HIGH
|
||||
// 5: LITE_POWER_RAND_LOW
|
||||
int lite_power_mode = 0;
|
||||
|
||||
// ======Only for Trt Backend=======
|
||||
std::map<std::string, std::vector<int32_t>> trt_max_shape;
|
||||
std::map<std::string, std::vector<int32_t>> trt_min_shape;
|
||||
|
Reference in New Issue
Block a user