[lite] Add threads and power_mode option support (#298)

* [cmake] support Android arm64-v8a & armeabi-v7a native c++ sdk

* [cmake] fixed patchelf download on mac and android

* [lite] Add threads and power_mode option support

* [pybind] update runtime pybind for lite power mode

* [python] Add set_lite_power_mode api to runtime
This commit is contained in:
DefTruth
2022-09-28 18:09:35 +08:00
committed by GitHub
parent 5e9a5755fd
commit c5f85de356
6 changed files with 66 additions and 16 deletions

View File

@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "fastdeploy/backends/lite/lite_backend.h" #include "fastdeploy/backends/lite/lite_backend.h"
#include <cstring> #include <cstring>
namespace fastdeploy { namespace fastdeploy {
@@ -40,8 +41,16 @@ FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
void LiteBackend::BuildOption(const LiteBackendOption& option) { void LiteBackend::BuildOption(const LiteBackendOption& option) {
std::vector<paddle::lite_api::Place> valid_places; std::vector<paddle::lite_api::Place> valid_places;
valid_places.push_back(paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)}); valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
config_.set_valid_places(valid_places); config_.set_valid_places(valid_places);
if (option.threads > 0) {
config_.set_threads(option.threads);
}
if (option.power_mode > 0) {
config_.set_power_mode(
static_cast<paddle::lite_api::PowerMode>(option.power_mode));
}
} }
bool LiteBackend::InitFromPaddle(const std::string& model_file, bool LiteBackend::InitFromPaddle(const std::string& model_file,
@@ -56,7 +65,9 @@ bool LiteBackend::InitFromPaddle(const std::string& model_file,
config_.set_model_file(model_file); config_.set_model_file(model_file);
config_.set_param_file(params_file); config_.set_param_file(params_file);
BuildOption(option); BuildOption(option);
predictor_ = paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::CxxConfig>(config_); predictor_ =
paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::CxxConfig>(
config_);
inputs_desc_.clear(); inputs_desc_.clear();
outputs_desc_.clear(); outputs_desc_.clear();
@@ -103,9 +114,7 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
return outputs_desc_[index]; return outputs_desc_[index];
} }
std::vector<TensorInfo> LiteBackend::GetOutputInfos() { std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }
return outputs_desc_;
}
bool LiteBackend::Infer(std::vector<FDTensor>& inputs, bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
std::vector<FDTensor>* outputs) { std::vector<FDTensor>* outputs) {
@@ -119,12 +128,15 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
for (size_t i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
auto iter = inputs_order_.find(inputs[i].name); auto iter = inputs_order_.find(inputs[i].name);
if (iter == inputs_order_.end()) { if (iter == inputs_order_.end()) {
FDERROR << "Cannot find input with name:" << inputs[i].name << " in loaded model." << std::endl; FDERROR << "Cannot find input with name:" << inputs[i].name
<< " in loaded model." << std::endl;
return false; return false;
} }
auto tensor = predictor_->GetInput(iter->second); auto tensor = predictor_->GetInput(iter->second);
tensor->Resize(inputs[i].shape); tensor->Resize(inputs[i].shape);
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()), inputs[i].Nbytes(), paddle::lite_api::TargetType::kARM); tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()),
inputs[i].Nbytes(),
paddle::lite_api::TargetType::kARM);
} }
predictor_->Run(); predictor_->Run();
@@ -132,8 +144,10 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
outputs->resize(outputs_desc_.size()); outputs->resize(outputs_desc_.size());
for (size_t i = 0; i < outputs_desc_.size(); ++i) { for (size_t i = 0; i < outputs_desc_.size(); ++i) {
auto tensor = predictor_->GetOutput(i); auto tensor = predictor_->GetOutput(i);
(*outputs)[i].Resize(tensor->shape(), outputs_desc_[i].dtype, outputs_desc_[i].name); (*outputs)[i].Resize(tensor->shape(), outputs_desc_[i].dtype,
memcpy((*outputs)[i].MutableData(), tensor->data<void>(), (*outputs)[i].Nbytes()); outputs_desc_[i].name);
memcpy((*outputs)[i].MutableData(), tensor->data<void>(),
(*outputs)[i].Nbytes());
} }
return true; return true;
} }

View File

@@ -25,6 +25,18 @@
namespace fastdeploy { namespace fastdeploy {
struct LiteBackendOption { struct LiteBackendOption {
// cpu num threads
int threads = 1;
// lite power mode
// 0: LITE_POWER_HIGH
// 1: LITE_POWER_LOW
// 2: LITE_POWER_FULL
// 3: LITE_POWER_NO_BIND
// 4: LITE_POWER_RAND_HIGH
// 5: LITE_POWER_RAND_LOW
int power_mode = 0;
// TODO(qiuyanjun): support more options for lite backend.
// Such as fp16, different device target (kARM/kXPU/kNPU/...)
}; };
// Convert data type from paddle lite to fastdeploy // Convert data type from paddle lite to fastdeploy

View File

@@ -34,6 +34,7 @@ void BindRuntime(pybind11::module& m) {
.def("disable_paddle_log_info", &RuntimeOption::DisablePaddleLogInfo) .def("disable_paddle_log_info", &RuntimeOption::DisablePaddleLogInfo)
.def("set_paddle_mkldnn_cache_size", .def("set_paddle_mkldnn_cache_size",
&RuntimeOption::SetPaddleMKLDNNCacheSize) &RuntimeOption::SetPaddleMKLDNNCacheSize)
.def("set_lite_power_mode", &RuntimeOption::SetLitePowerMode)
.def("set_trt_input_shape", &RuntimeOption::SetTrtInputShape) .def("set_trt_input_shape", &RuntimeOption::SetTrtInputShape)
.def("enable_trt_fp16", &RuntimeOption::EnableTrtFP16) .def("enable_trt_fp16", &RuntimeOption::EnableTrtFP16)
.def("disable_trt_fp16", &RuntimeOption::DisableTrtFP16) .def("disable_trt_fp16", &RuntimeOption::DisableTrtFP16)

View File

@@ -148,7 +148,9 @@ void RuntimeOption::SetModelPath(const std::string& model_path,
model_file = model_path; model_file = model_path;
model_format = ModelFormat::ONNX; model_format = ModelFormat::ONNX;
} else { } else {
FDASSERT(false, "The model format only can be ModelFormat::PADDLE/ModelFormat::ONNX."); FDASSERT(
false,
"The model format only can be ModelFormat::PADDLE/ModelFormat::ONNX.");
} }
} }
@@ -228,6 +230,11 @@ void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) {
pd_mkldnn_cache_size = size; pd_mkldnn_cache_size = size;
} }
void RuntimeOption::SetLitePowerMode(int mode) {
FDASSERT(mode > -1, "Parameter mode must greater than -1.");
lite_power_mode = mode;
}
void RuntimeOption::SetTrtInputShape(const std::string& input_name, void RuntimeOption::SetTrtInputShape(const std::string& input_name,
const std::vector<int32_t>& min_shape, const std::vector<int32_t>& min_shape,
const std::vector<int32_t>& opt_shape, const std::vector<int32_t>& opt_shape,
@@ -465,6 +472,8 @@ void Runtime::CreateTrtBackend() {
void Runtime::CreateLiteBackend() { void Runtime::CreateLiteBackend() {
#ifdef ENABLE_LITE_BACKEND #ifdef ENABLE_LITE_BACKEND
auto lite_option = LiteBackendOption(); auto lite_option = LiteBackendOption();
lite_option.threads = option.cpu_thread_num;
lite_option.power_mode = option.lite_power_mode;
FDASSERT(option.model_format == ModelFormat::PADDLE, FDASSERT(option.model_format == ModelFormat::PADDLE,
"LiteBackend only support model format of ModelFormat::PADDLE"); "LiteBackend only support model format of ModelFormat::PADDLE");
backend_ = utils::make_unique<LiteBackend>(); backend_ = utils::make_unique<LiteBackend>();

View File

@@ -84,6 +84,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
// set size of cached shape while enable mkldnn with paddle inference backend // set size of cached shape while enable mkldnn with paddle inference backend
void SetPaddleMKLDNNCacheSize(int size); void SetPaddleMKLDNNCacheSize(int size);
// set the power mode of paddle lite backend.
void SetLitePowerMode(int mode);
// set tensorrt shape while the inputs of model contain dynamic shape // set tensorrt shape while the inputs of model contain dynamic shape
// min_shape: the minimum shape // min_shape: the minimum shape
// opt_shape: the most common shape while inference, default be empty // opt_shape: the most common shape while inference, default be empty
@@ -126,6 +129,12 @@ struct FASTDEPLOY_DECL RuntimeOption {
int pd_mkldnn_cache_size = 1; int pd_mkldnn_cache_size = 1;
std::vector<std::string> pd_delete_pass_names; std::vector<std::string> pd_delete_pass_names;
// ======Only for Paddle-Lite Backend=====
// 0: LITE_POWER_HIGH 1: LITE_POWER_LOW 2: LITE_POWER_FULL
// 3: LITE_POWER_NO_BIND 4: LITE_POWER_RAND_HIGH
// 5: LITE_POWER_RAND_LOW
int lite_power_mode = 0;
// ======Only for Trt Backend======= // ======Only for Trt Backend=======
std::map<std::string, std::vector<int32_t>> trt_max_shape; std::map<std::string, std::vector<int32_t>> trt_max_shape;
std::map<std::string, std::vector<int32_t>> trt_min_shape; std::map<std::string, std::vector<int32_t>> trt_min_shape;

View File

@@ -54,7 +54,9 @@ class RuntimeOption:
def __init__(self): def __init__(self):
self._option = C.RuntimeOption() self._option = C.RuntimeOption()
def set_model_path(self, model_path, params_path="", def set_model_path(self,
model_path,
params_path="",
model_format=C.ModelFormat.PADDLE): model_format=C.ModelFormat.PADDLE):
return self._option.set_model_path(model_path, params_path, return self._option.set_model_path(model_path, params_path,
model_format) model_format)
@@ -98,6 +100,9 @@ class RuntimeOption:
def set_paddle_mkldnn_cache_size(self, cache_size): def set_paddle_mkldnn_cache_size(self, cache_size):
return self._option.set_paddle_mkldnn_cache_size(cache_size) return self._option.set_paddle_mkldnn_cache_size(cache_size)
def set_lite_power_mode(self, mode):
return self._option.set_lite_power_mode(mode)
def set_trt_input_shape(self, def set_trt_input_shape(self,
tensor_name, tensor_name,
min_shape, min_shape,