[Example] Update runtime examples (#1542)

* Add notes for tensors

* Optimize some apis

* move some warnings
This commit is contained in:
Jason
2023-03-08 16:56:04 +08:00
committed by GitHub
parent 3d31834193
commit 6be2c0367b
17 changed files with 425 additions and 277 deletions

View File

@@ -54,6 +54,8 @@ struct PaddleBackendOption {
bool enable_mkldnn = true;
/// Use Paddle Inference + TensorRT to inference model on GPU
bool enable_trt = false;
/// Whether enable memory optimize, default true
bool enable_memory_optimize = true;
/*
* @brief IPU option, this will configure the IPU hardware, if inference model in IPU

View File

@@ -41,6 +41,7 @@ void BindPaddleOption(pybind11::module& m) {
.def_readwrite("enable_log_info", &PaddleBackendOption::enable_log_info)
.def_readwrite("enable_mkldnn", &PaddleBackendOption::enable_mkldnn)
.def_readwrite("enable_trt", &PaddleBackendOption::enable_trt)
.def_readwrite("enable_memory_optimize", &PaddleBackendOption::enable_memory_optimize)
.def_readwrite("ipu_option", &PaddleBackendOption::ipu_option)
.def_readwrite("collect_trt_shape",
&PaddleBackendOption::collect_trt_shape)

View File

@@ -147,7 +147,9 @@ bool PaddleBackend::InitFromPaddle(const std::string& model_buffer,
}
config_.SetModelBuffer(model_buffer.c_str(), model_buffer.size(),
params_buffer.c_str(), params_buffer.size());
config_.EnableMemoryOptim();
if (option.enable_memory_optimize) {
config_.EnableMemoryOptim();
}
BuildOption(option);
// The input/output information get from predictor is not right, use