From c25d1cc1bce44ef56fc7c86ae9bd7f3f931b9b65 Mon Sep 17 00:00:00 2001 From: WJJ1995 Date: Tue, 14 Feb 2023 17:51:39 +0800 Subject: [PATCH] [Backend]Fixed enable_paddle_to_trt() bug (#1320) * add GPL lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * support yolov8 * add pybind for yolov8 * add yolov8 readme * add cpp benchmark * add cpu and gpu mem * public part split * add runtime mode * fixed bugs * add cpu_thread_nums * deal with comments * deal with comments * deal with comments * rm useless code * add FASTDEPLOY_DECL * add FASTDEPLOY_DECL * fixed for windows * mv rss to pss * mv rss to pss * Update utils.cc * use thread to collect mem * Add ResourceUsageMonitor * rm useless code * fixed bug * fixed typo * update ResourceUsageMonitor * fixed bug * fixed bug * add note for ResourceUsageMonitor * deal with comments * add macros * deal with comments * deal with comments * deal with comments * re-lint * rm pmap and use mem api * rm pmap and use mem api * add mem api * Add PrintBenchmarkInfo func * Add PrintBenchmarkInfo func * Add PrintBenchmarkInfo func * deal with comments * fixed enable_paddle_to_trt * add log for paddle_trt --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> --- fastdeploy/runtime/backends/paddle/paddle_backend.cc | 4 ++++ python/fastdeploy/runtime.py | 5 +++-- 2 files changed, 7 insertions(+), 2 deletions(-) mode change 100644 => 100755 fastdeploy/runtime/backends/paddle/paddle_backend.cc mode change 100644 => 100755 python/fastdeploy/runtime.py diff --git a/fastdeploy/runtime/backends/paddle/paddle_backend.cc b/fastdeploy/runtime/backends/paddle/paddle_backend.cc old mode 100644 new mode 100755 index 09dbe812a..dc804e926 --- a/fastdeploy/runtime/backends/paddle/paddle_backend.cc +++ b/fastdeploy/runtime/backends/paddle/paddle_backend.cc @@ -29,6 +29,10 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) { config_.SetExecStream(option_.external_stream_); } if (option.enable_trt) { + if (!option.trt_option.enable_fp16) { + FDINFO << "Will try to use tensorrt inference with Paddle Backend." + << std::endl; + } config_.Exp_DisableTensorRtOPs(option.trt_disabled_ops_); auto precision = paddle_infer::PrecisionType::kFloat32; if (option.trt_option.enable_fp16) { diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py old mode 100644 new mode 100755 index 6be764ea3..4980db52e --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -532,9 +532,10 @@ class RuntimeOption: logging.warning(" option = fd.RuntimeOption()") logging.warning(" option.use_gpu(0)") logging.warning(" option.use_paddle_infer_backend()") - logging.warning(" option.paddle_infer_option.enabel_trt = True") + logging.warning(" option.paddle_infer_option.enable_trt = True") logging.warning(" ==============================================") - return self._option.enable_paddle_to_trt() + self._option.use_paddle_backend() + self._option.paddle_infer_option.enable_trt = True def set_trt_max_workspace_size(self, trt_max_workspace_size): """Set max workspace size while using TensorRT backend.