mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-08 10:00:29 +08:00
[Example] Update runtime examples (#1542)
* Add notes for tensors * Optimize some apis * move some warnings
This commit is contained in:
@@ -54,6 +54,8 @@ struct PaddleBackendOption {
|
||||
bool enable_mkldnn = true;
|
||||
/// Use Paddle Inference + TensorRT to inference model on GPU
|
||||
bool enable_trt = false;
|
||||
/// Whether enable memory optimize, default true
|
||||
bool enable_memory_optimize = true;
|
||||
|
||||
/*
|
||||
* @brief IPU option, this will configure the IPU hardware, if inference model in IPU
|
||||
|
@@ -41,6 +41,7 @@ void BindPaddleOption(pybind11::module& m) {
|
||||
.def_readwrite("enable_log_info", &PaddleBackendOption::enable_log_info)
|
||||
.def_readwrite("enable_mkldnn", &PaddleBackendOption::enable_mkldnn)
|
||||
.def_readwrite("enable_trt", &PaddleBackendOption::enable_trt)
|
||||
.def_readwrite("enable_memory_optimize", &PaddleBackendOption::enable_memory_optimize)
|
||||
.def_readwrite("ipu_option", &PaddleBackendOption::ipu_option)
|
||||
.def_readwrite("collect_trt_shape",
|
||||
&PaddleBackendOption::collect_trt_shape)
|
||||
|
@@ -147,7 +147,9 @@ bool PaddleBackend::InitFromPaddle(const std::string& model_buffer,
|
||||
}
|
||||
config_.SetModelBuffer(model_buffer.c_str(), model_buffer.size(),
|
||||
params_buffer.c_str(), params_buffer.size());
|
||||
config_.EnableMemoryOptim();
|
||||
if (option.enable_memory_optimize) {
|
||||
config_.EnableMemoryOptim();
|
||||
}
|
||||
BuildOption(option);
|
||||
|
||||
// The input/output information get from predictor is not right, use
|
||||
|
Reference in New Issue
Block a user