diff --git a/docs/api/runtime/runtime_option.md b/docs/api/runtime/runtime_option.md index 11ed76bfa..9e713aabe 100644 --- a/docs/api/runtime/runtime_option.md +++ b/docs/api/runtime/runtime_option.md @@ -66,7 +66,7 @@ use_openvino_backend() 使用OpenVINO后端进行推理,支持CPU, 支持Paddle/ONNX模型格式 ``` -set_paddle_mkldnn() +set_paddle_mkldnn(pd_mkldnn=True) ``` 当使用Paddle Inference后端时,通过此开关开启或关闭CPU上MKLDNN推理加速,后端默认为开启 diff --git a/docs/docs_en/api/runtime/runtime_option.md b/docs/docs_en/api/runtime/runtime_option.md index 003ad1823..4c0af6a92 100644 --- a/docs/docs_en/api/runtime/runtime_option.md +++ b/docs/docs_en/api/runtime/runtime_option.md @@ -73,7 +73,7 @@ use_openvino_backend() Inference with OpenVINO backend (CPU supported, Paddle/ONNX model format supported) ``` -set_paddle_mkldnn() +set_paddle_mkldnn(pd_mkldnn=True) ``` When using the Paddle Inference backend, this parameter determines whether the MKLDNN inference acceleration on the CPU is on or off. It is on by default. diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py index 3d156ef62..0e11f5f5e 100644 --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -85,8 +85,8 @@ class RuntimeOption: def use_lite_backend(self): return self._option.use_lite_backend() - def set_paddle_mkldnn(self): - return self._option.set_paddle_mkldnn() + def set_paddle_mkldnn(self, pd_mkldnn=True): + return self._option.set_paddle_mkldnn(pd_mkldnn) def enable_paddle_log_info(self): return self._option.enable_paddle_log_info()