From f2e492c55bccd388534de68f49054e04f687b65b Mon Sep 17 00:00:00 2001 From: Jason Date: Sat, 12 Nov 2022 14:53:39 +0800 Subject: [PATCH] [Other] Add function alias (#571) Add function alias --- fastdeploy/runtime.h | 10 ++++++++++ python/fastdeploy/runtime.py | 16 ++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/fastdeploy/runtime.h b/fastdeploy/runtime.h index dc9732624..8822308ec 100755 --- a/fastdeploy/runtime.h +++ b/fastdeploy/runtime.h @@ -115,6 +115,11 @@ struct FASTDEPLOY_DECL RuntimeOption { /// Set Paddle Inference as inference backend, support CPU/GPU void UsePaddleBackend(); + /// Wrapper function of UsePaddleBackend() + void UsePaddleInferBackend() { + return UsePaddleBackend(); + } + /// Set ONNX Runtime as inference backend, support CPU/GPU void UseOrtBackend(); @@ -130,6 +135,11 @@ struct FASTDEPLOY_DECL RuntimeOption { /// Set Paddle Lite as inference backend, only support arm cpu void UseLiteBackend(); + /// Wrapper function of UseLiteBackend() + void UsePaddleLiteBackend() { + return UseLiteBackend(); + } + /// Set mkldnn switch while using Paddle Inference as inference backend void SetPaddleMKLDNN(bool pd_mkldnn = true); diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py index 4d0311d4b..d6a16adaa 100755 --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -172,8 +172,7 @@ class RuntimeOption: @long_to_int.setter def long_to_int(self, value): assert isinstance( - value, - bool), "The value to set `long_to_int` must be type of bool." + value, bool), "The value to set `long_to_int` must be type of bool." self._option.long_to_int = value @use_nvidia_tf32.setter @@ -230,6 +229,11 @@ class RuntimeOption: """ return self._option.use_paddle_backend() + def use_paddle_infer_backend(self): + """Wrapper function of use_paddle_backend(), use Paddle Inference backend, support inference Paddle model on CPU/Nvidia GPU. + """ + return self.use_paddle_backend() + def use_poros_backend(self): """Use Poros backend, support inference TorchScript model on CPU/Nvidia GPU. """ @@ -255,6 +259,11 @@ class RuntimeOption: """ return self._option.use_lite_backend() + def use_paddle_lite_backend(self): + """Wrapper function of use_lite_backend(), use Paddle Lite backend, support inference Paddle model on ARM CPU. + """ + return self.use_lite_backend() + def set_paddle_mkldnn(self, use_mkldnn=True): """Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default. """ @@ -383,8 +392,7 @@ class RuntimeOption: continue if hasattr(getattr(self._option, attr), "__call__"): continue - message += " {} : {}\t\n".format(attr, - getattr(self._option, attr)) + message += " {} : {}\t\n".format(attr, getattr(self._option, attr)) message.strip("\n") message += ")" return message