mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
Fix set_paddle_mkldnn python interface (#328)
* fd serving add dockerfile * fix enable_paddle_mkldnn * delete disable_paddle_mkldnn * fix python set_paddle_mkldnn Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -66,7 +66,7 @@ use_openvino_backend()
|
||||
使用OpenVINO后端进行推理,支持CPU, 支持Paddle/ONNX模型格式
|
||||
|
||||
```
|
||||
set_paddle_mkldnn()
|
||||
set_paddle_mkldnn(pd_mkldnn=True)
|
||||
```
|
||||
当使用Paddle Inference后端时,通过此开关开启或关闭CPU上MKLDNN推理加速,后端默认为开启
|
||||
|
||||
|
@@ -73,7 +73,7 @@ use_openvino_backend()
|
||||
Inference with OpenVINO backend (CPU supported, Paddle/ONNX model format supported)
|
||||
|
||||
```
|
||||
set_paddle_mkldnn()
|
||||
set_paddle_mkldnn(pd_mkldnn=True)
|
||||
```
|
||||
|
||||
When using the Paddle Inference backend, this parameter determines whether the MKLDNN inference acceleration on the CPU is on or off. It is on by default.
|
||||
|
@@ -85,8 +85,8 @@ class RuntimeOption:
|
||||
def use_lite_backend(self):
|
||||
return self._option.use_lite_backend()
|
||||
|
||||
def set_paddle_mkldnn(self):
|
||||
return self._option.set_paddle_mkldnn()
|
||||
def set_paddle_mkldnn(self, pd_mkldnn=True):
|
||||
return self._option.set_paddle_mkldnn(pd_mkldnn)
|
||||
|
||||
def enable_paddle_log_info(self):
|
||||
return self._option.enable_paddle_log_info()
|
||||
|
Reference in New Issue
Block a user