mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[Other] Optimize paddle backend (#1265)
* Optimize paddle backend * optimize paddle backend * add version support
This commit is contained in:
@@ -364,7 +364,10 @@ class RuntimeOption:
|
||||
def set_paddle_mkldnn(self, use_mkldnn=True):
|
||||
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
|
||||
"""
|
||||
return self._option.set_paddle_mkldnn(use_mkldnn)
|
||||
logging.warning(
|
||||
"`RuntimeOption.set_paddle_mkldnn` will be derepcated in v1.2.0, please use `RuntimeOption.paddle_infer_option.enable_mkldnn = True` instead."
|
||||
)
|
||||
self._option.paddle_infer_option.enable_mkldnn = True
|
||||
|
||||
def set_openvino_device(self, name="CPU"):
|
||||
"""Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
|
||||
@@ -400,17 +403,26 @@ class RuntimeOption:
|
||||
def enable_paddle_log_info(self):
|
||||
"""Enable print out the debug log information while using Paddle Inference backend, the log information is disabled by default.
|
||||
"""
|
||||
return self._option.enable_paddle_log_info()
|
||||
logging.warning(
|
||||
"RuntimeOption.enable_paddle_log_info` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.enable_log_info = True` instead."
|
||||
)
|
||||
self._option.paddle_infer_option.enable_log_info = True
|
||||
|
||||
def disable_paddle_log_info(self):
|
||||
"""Disable print out the debug log information while using Paddle Inference backend, the log information is disabled by default.
|
||||
"""
|
||||
return self._option.disable_paddle_log_info()
|
||||
logging.warning(
|
||||
"RuntimeOption.disable_paddle_log_info` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.enable_log_info = False` instead."
|
||||
)
|
||||
self._option.paddle_infer_option.enable_log_info = False
|
||||
|
||||
def set_paddle_mkldnn_cache_size(self, cache_size):
|
||||
"""Set size of shape cache while using Paddle Inference backend with MKLDNN enabled, default will cache all the dynamic shape.
|
||||
"""
|
||||
return self._option.set_paddle_mkldnn_cache_size(cache_size)
|
||||
logging.warning(
|
||||
"RuntimeOption.set_paddle_mkldnn_cache_size` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.mkldnn_cache_size = {}` instead.".
|
||||
format(cache_size))
|
||||
self._option.paddle_infer_option.mkldnn_cache_size = cache_size
|
||||
|
||||
def enable_lite_fp16(self):
|
||||
"""Enable half precision inference while using Paddle Lite backend on ARM CPU, fp16 is disabled by default.
|
||||
@@ -498,6 +510,16 @@ class RuntimeOption:
|
||||
def enable_paddle_to_trt(self):
|
||||
"""While using TensorRT backend, enable_paddle_to_trt() will change to use Paddle Inference backend, and use its integrated TensorRT instead.
|
||||
"""
|
||||
logging.warning(
|
||||
"`RuntimeOption.enable_paddle_to_trt` will be deprecated in v1.2.l0, if you want to run tensorrt with Paddle Inference backend, please use the following method, "
|
||||
)
|
||||
logging.warning(" ==============================================")
|
||||
logging.warning(" import fastdeploy as fd")
|
||||
logging.warning(" option = fd.RuntimeOption()")
|
||||
logging.warning(" option.use_gpu(0)")
|
||||
logging.warning(" option.use_paddle_infer_backend()")
|
||||
logging.warning(" option.paddle_infer_option.enabel_trt = True")
|
||||
logging.warning(" ==============================================")
|
||||
return self._option.enable_paddle_to_trt()
|
||||
|
||||
def set_trt_max_workspace_size(self, trt_max_workspace_size):
|
||||
@@ -519,22 +541,34 @@ class RuntimeOption:
|
||||
def enable_paddle_trt_collect_shape(self):
|
||||
"""Enable collect subgraph shape information while using Paddle Inference with TensorRT
|
||||
"""
|
||||
return self._option.enable_paddle_trt_collect_shape()
|
||||
logging.warning(
|
||||
"`RuntimeOption.enable_paddle_trt_collect_shape` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.collect_trt_shape = True` instead."
|
||||
)
|
||||
self._option.paddle_infer_option.collect_trt_shape = True
|
||||
|
||||
def disable_paddle_trt_collect_shape(self):
|
||||
"""Disable collect subgraph shape information while using Paddle Inference with TensorRT
|
||||
"""
|
||||
return self._option.disable_paddle_trt_collect_shape()
|
||||
logging.warning(
|
||||
"`RuntimeOption.disable_paddle_trt_collect_shape` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.collect_trt_shape = False` instead."
|
||||
)
|
||||
self._option.paddle_infer_option.collect_trt_shape = False
|
||||
|
||||
def delete_paddle_backend_pass(self, pass_name):
|
||||
"""Delete pass by name in paddle backend
|
||||
"""
|
||||
return self._option.delete_paddle_backend_pass(pass_name)
|
||||
logging.warning(
|
||||
"`RuntimeOption.delete_paddle_backend_pass` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.delete_pass` instead."
|
||||
)
|
||||
self._option.paddle_infer_option.delete_pass(pass_name)
|
||||
|
||||
def disable_paddle_trt_ops(self, ops):
|
||||
"""Disable some ops in paddle trt backend
|
||||
"""
|
||||
return self._option.disable_paddle_trt_ops(ops)
|
||||
logging.warning(
|
||||
"`RuntimeOption.disable_paddle_trt_ops` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.disable_trt_ops()` instead."
|
||||
)
|
||||
self._option.disable_trt_ops(ops)
|
||||
|
||||
def use_ipu(self,
|
||||
device_num=1,
|
||||
@@ -593,6 +627,14 @@ class RuntimeOption:
|
||||
"""
|
||||
return self._option.trt_option
|
||||
|
||||
@property
|
||||
def paddle_infer_option(self):
|
||||
"""Get PaddleBackendOption object to configure Paddle Inference backend
|
||||
|
||||
:return PaddleBackendOption
|
||||
"""
|
||||
return self._option.paddle_infer_option
|
||||
|
||||
def enable_profiling(self, inclue_h2d_d2h=False, repeat=100, warmup=50):
|
||||
"""Set the profile mode as 'true'.
|
||||
:param inclue_h2d_d2h Whether to include time of H2D_D2H for time of runtime.
|
||||
|
Reference in New Issue
Block a user