diff --git a/fastdeploy/runtime/backends/paddle/paddle_backend.cc b/fastdeploy/runtime/backends/paddle/paddle_backend.cc old mode 100644 new mode 100755 index 09dbe812a..dc804e926 --- a/fastdeploy/runtime/backends/paddle/paddle_backend.cc +++ b/fastdeploy/runtime/backends/paddle/paddle_backend.cc @@ -29,6 +29,10 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) { config_.SetExecStream(option_.external_stream_); } if (option.enable_trt) { + if (!option.trt_option.enable_fp16) { + FDINFO << "Will try to use tensorrt inference with Paddle Backend." + << std::endl; + } config_.Exp_DisableTensorRtOPs(option.trt_disabled_ops_); auto precision = paddle_infer::PrecisionType::kFloat32; if (option.trt_option.enable_fp16) { diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py old mode 100644 new mode 100755 index 6be764ea3..4980db52e --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -532,9 +532,10 @@ class RuntimeOption: logging.warning(" option = fd.RuntimeOption()") logging.warning(" option.use_gpu(0)") logging.warning(" option.use_paddle_infer_backend()") - logging.warning(" option.paddle_infer_option.enabel_trt = True") + logging.warning(" option.paddle_infer_option.enable_trt = True") logging.warning(" ==============================================") - return self._option.enable_paddle_to_trt() + self._option.use_paddle_backend() + self._option.paddle_infer_option.enable_trt = True def set_trt_max_workspace_size(self, trt_max_workspace_size): """Set max workspace size while using TensorRT backend.