mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Backend]Fixed enable_paddle_to_trt() bug (#1320)
* add GPL lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * support yolov8 * add pybind for yolov8 * add yolov8 readme * add cpp benchmark * add cpu and gpu mem * public part split * add runtime mode * fixed bugs * add cpu_thread_nums * deal with comments * deal with comments * deal with comments * rm useless code * add FASTDEPLOY_DECL * add FASTDEPLOY_DECL * fixed for windows * mv rss to pss * mv rss to pss * Update utils.cc * use thread to collect mem * Add ResourceUsageMonitor * rm useless code * fixed bug * fixed typo * update ResourceUsageMonitor * fixed bug * fixed bug * add note for ResourceUsageMonitor * deal with comments * add macros * deal with comments * deal with comments * deal with comments * re-lint * rm pmap and use mem api * rm pmap and use mem api * add mem api * Add PrintBenchmarkInfo func * Add PrintBenchmarkInfo func * Add PrintBenchmarkInfo func * deal with comments * fixed enable_paddle_to_trt * add log for paddle_trt --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
4
fastdeploy/runtime/backends/paddle/paddle_backend.cc
Normal file → Executable file
4
fastdeploy/runtime/backends/paddle/paddle_backend.cc
Normal file → Executable file
@@ -29,6 +29,10 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) {
|
|||||||
config_.SetExecStream(option_.external_stream_);
|
config_.SetExecStream(option_.external_stream_);
|
||||||
}
|
}
|
||||||
if (option.enable_trt) {
|
if (option.enable_trt) {
|
||||||
|
if (!option.trt_option.enable_fp16) {
|
||||||
|
FDINFO << "Will try to use tensorrt inference with Paddle Backend."
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
config_.Exp_DisableTensorRtOPs(option.trt_disabled_ops_);
|
config_.Exp_DisableTensorRtOPs(option.trt_disabled_ops_);
|
||||||
auto precision = paddle_infer::PrecisionType::kFloat32;
|
auto precision = paddle_infer::PrecisionType::kFloat32;
|
||||||
if (option.trt_option.enable_fp16) {
|
if (option.trt_option.enable_fp16) {
|
||||||
|
5
python/fastdeploy/runtime.py
Normal file → Executable file
5
python/fastdeploy/runtime.py
Normal file → Executable file
@@ -532,9 +532,10 @@ class RuntimeOption:
|
|||||||
logging.warning(" option = fd.RuntimeOption()")
|
logging.warning(" option = fd.RuntimeOption()")
|
||||||
logging.warning(" option.use_gpu(0)")
|
logging.warning(" option.use_gpu(0)")
|
||||||
logging.warning(" option.use_paddle_infer_backend()")
|
logging.warning(" option.use_paddle_infer_backend()")
|
||||||
logging.warning(" option.paddle_infer_option.enabel_trt = True")
|
logging.warning(" option.paddle_infer_option.enable_trt = True")
|
||||||
logging.warning(" ==============================================")
|
logging.warning(" ==============================================")
|
||||||
return self._option.enable_paddle_to_trt()
|
self._option.use_paddle_backend()
|
||||||
|
self._option.paddle_infer_option.enable_trt = True
|
||||||
|
|
||||||
def set_trt_max_workspace_size(self, trt_max_workspace_size):
|
def set_trt_max_workspace_size(self, trt_max_workspace_size):
|
||||||
"""Set max workspace size while using TensorRT backend.
|
"""Set max workspace size while using TensorRT backend.
|
||||||
|
Reference in New Issue
Block a user