mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
@@ -115,6 +115,11 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
|||||||
/// Set Paddle Inference as inference backend, support CPU/GPU
|
/// Set Paddle Inference as inference backend, support CPU/GPU
|
||||||
void UsePaddleBackend();
|
void UsePaddleBackend();
|
||||||
|
|
||||||
|
/// Wrapper function of UsePaddleBackend()
|
||||||
|
void UsePaddleInferBackend() {
|
||||||
|
return UsePaddleBackend();
|
||||||
|
}
|
||||||
|
|
||||||
/// Set ONNX Runtime as inference backend, support CPU/GPU
|
/// Set ONNX Runtime as inference backend, support CPU/GPU
|
||||||
void UseOrtBackend();
|
void UseOrtBackend();
|
||||||
|
|
||||||
@@ -130,6 +135,11 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
|||||||
/// Set Paddle Lite as inference backend, only support arm cpu
|
/// Set Paddle Lite as inference backend, only support arm cpu
|
||||||
void UseLiteBackend();
|
void UseLiteBackend();
|
||||||
|
|
||||||
|
/// Wrapper function of UseLiteBackend()
|
||||||
|
void UsePaddleLiteBackend() {
|
||||||
|
return UseLiteBackend();
|
||||||
|
}
|
||||||
|
|
||||||
/// Set mkldnn switch while using Paddle Inference as inference backend
|
/// Set mkldnn switch while using Paddle Inference as inference backend
|
||||||
void SetPaddleMKLDNN(bool pd_mkldnn = true);
|
void SetPaddleMKLDNN(bool pd_mkldnn = true);
|
||||||
|
|
||||||
|
@@ -172,8 +172,7 @@ class RuntimeOption:
|
|||||||
@long_to_int.setter
|
@long_to_int.setter
|
||||||
def long_to_int(self, value):
|
def long_to_int(self, value):
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
value,
|
value, bool), "The value to set `long_to_int` must be type of bool."
|
||||||
bool), "The value to set `long_to_int` must be type of bool."
|
|
||||||
self._option.long_to_int = value
|
self._option.long_to_int = value
|
||||||
|
|
||||||
@use_nvidia_tf32.setter
|
@use_nvidia_tf32.setter
|
||||||
@@ -230,6 +229,11 @@ class RuntimeOption:
|
|||||||
"""
|
"""
|
||||||
return self._option.use_paddle_backend()
|
return self._option.use_paddle_backend()
|
||||||
|
|
||||||
|
def use_paddle_infer_backend(self):
|
||||||
|
"""Wrapper function of use_paddle_backend(), use Paddle Inference backend, support inference Paddle model on CPU/Nvidia GPU.
|
||||||
|
"""
|
||||||
|
return self.use_paddle_backend()
|
||||||
|
|
||||||
def use_poros_backend(self):
|
def use_poros_backend(self):
|
||||||
"""Use Poros backend, support inference TorchScript model on CPU/Nvidia GPU.
|
"""Use Poros backend, support inference TorchScript model on CPU/Nvidia GPU.
|
||||||
"""
|
"""
|
||||||
@@ -255,6 +259,11 @@ class RuntimeOption:
|
|||||||
"""
|
"""
|
||||||
return self._option.use_lite_backend()
|
return self._option.use_lite_backend()
|
||||||
|
|
||||||
|
def use_paddle_lite_backend(self):
|
||||||
|
"""Wrapper function of use_lite_backend(), use Paddle Lite backend, support inference Paddle model on ARM CPU.
|
||||||
|
"""
|
||||||
|
return self.use_lite_backend()
|
||||||
|
|
||||||
def set_paddle_mkldnn(self, use_mkldnn=True):
|
def set_paddle_mkldnn(self, use_mkldnn=True):
|
||||||
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
|
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
|
||||||
"""
|
"""
|
||||||
@@ -383,8 +392,7 @@ class RuntimeOption:
|
|||||||
continue
|
continue
|
||||||
if hasattr(getattr(self._option, attr), "__call__"):
|
if hasattr(getattr(self._option, attr), "__call__"):
|
||||||
continue
|
continue
|
||||||
message += " {} : {}\t\n".format(attr,
|
message += " {} : {}\t\n".format(attr, getattr(self._option, attr))
|
||||||
getattr(self._option, attr))
|
|
||||||
message.strip("\n")
|
message.strip("\n")
|
||||||
message += ")"
|
message += ")"
|
||||||
return message
|
return message
|
||||||
|
Reference in New Issue
Block a user