mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
@@ -172,8 +172,7 @@ class RuntimeOption:
|
||||
@long_to_int.setter
|
||||
def long_to_int(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `long_to_int` must be type of bool."
|
||||
value, bool), "The value to set `long_to_int` must be type of bool."
|
||||
self._option.long_to_int = value
|
||||
|
||||
@use_nvidia_tf32.setter
|
||||
@@ -230,6 +229,11 @@ class RuntimeOption:
|
||||
"""
|
||||
return self._option.use_paddle_backend()
|
||||
|
||||
def use_paddle_infer_backend(self):
|
||||
"""Wrapper function of use_paddle_backend(), use Paddle Inference backend, support inference Paddle model on CPU/Nvidia GPU.
|
||||
"""
|
||||
return self.use_paddle_backend()
|
||||
|
||||
def use_poros_backend(self):
|
||||
"""Use Poros backend, support inference TorchScript model on CPU/Nvidia GPU.
|
||||
"""
|
||||
@@ -255,6 +259,11 @@ class RuntimeOption:
|
||||
"""
|
||||
return self._option.use_lite_backend()
|
||||
|
||||
def use_paddle_lite_backend(self):
|
||||
"""Wrapper function of use_lite_backend(), use Paddle Lite backend, support inference Paddle model on ARM CPU.
|
||||
"""
|
||||
return self.use_lite_backend()
|
||||
|
||||
def set_paddle_mkldnn(self, use_mkldnn=True):
|
||||
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
|
||||
"""
|
||||
@@ -383,8 +392,7 @@ class RuntimeOption:
|
||||
continue
|
||||
if hasattr(getattr(self._option, attr), "__call__"):
|
||||
continue
|
||||
message += " {} : {}\t\n".format(attr,
|
||||
getattr(self._option, attr))
|
||||
message += " {} : {}\t\n".format(attr, getattr(self._option, attr))
|
||||
message.strip("\n")
|
||||
message += ")"
|
||||
return message
|
||||
|
Reference in New Issue
Block a user