diff --git a/benchmark/cpp/option.h b/benchmark/cpp/option.h index 3bc6a94ca..18054587c 100755 --- a/benchmark/cpp/option.h +++ b/benchmark/cpp/option.h @@ -34,6 +34,7 @@ static void UpdateBaseCustomFlags( if (FLAGS_xpu_l3_cache >= 0) { config_info["xpu_l3_cache"] = std::to_string(FLAGS_xpu_l3_cache); } + // update custom options for paddle backend if (FLAGS_enable_log_info) { config_info["enable_log_info"] = "true"; } else { diff --git a/fastdeploy/runtime/backends/paddle/option.h b/fastdeploy/runtime/backends/paddle/option.h index c6f48ff0c..993d5ea3c 100755 --- a/fastdeploy/runtime/backends/paddle/option.h +++ b/fastdeploy/runtime/backends/paddle/option.h @@ -85,6 +85,8 @@ struct PaddleBackendOption { bool enable_memory_optimize = true; /// Whether enable ir debug, default false bool switch_ir_debug = false; + /// Whether enable ir optimize, default true + bool switch_ir_optimize = true; /* * @brief IPU option, this will configure the IPU hardware, if inference model in IPU diff --git a/fastdeploy/runtime/backends/paddle/paddle_backend.cc b/fastdeploy/runtime/backends/paddle/paddle_backend.cc index 3db73a00a..9e945d86f 100644 --- a/fastdeploy/runtime/backends/paddle/paddle_backend.cc +++ b/fastdeploy/runtime/backends/paddle/paddle_backend.cc @@ -84,6 +84,9 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) { #endif } else if (option.device == Device::KUNLUNXIN) { #ifdef WITH_KUNLUNXIN + // Note(qiuyanjun): For Paddle XPU L3 Cache, please set + // export XPU_PADDLE_L3_SIZE=67104768 (XPU R200) + // export FLAGS_fuse_multi_transformer_quant_type="float" config_.EnableXpu(option.xpu_option.kunlunxin_l3_workspace_size, option.xpu_option.kunlunxin_locked, option.xpu_option.kunlunxin_autotune, @@ -117,6 +120,9 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) { } else { config_.SetCpuMathLibraryNumThreads(option.cpu_thread_num); } + // Note: SwitchIrOptim is enabled by default for paddle inference + // backend. So, we don't need to set it manually. + // config_.SwitchIrOptim(option.switch_ir_optimize); } bool PaddleBackend::Init(const RuntimeOption& runtime_option) { diff --git a/serving/docs/zh_CN/xpu.md b/serving/docs/zh_CN/xpu.md index 2f4b2982a..ef8f4a439 100644 --- a/serving/docs/zh_CN/xpu.md +++ b/serving/docs/zh_CN/xpu.md @@ -36,7 +36,12 @@ docker run -itd --name fd_xpu_server -v `pwd`/:/serving --net=host --privileged ```bash docker exec -it fd_xpu_server /bin/bash cd /opt/fastdeploy/benchmark/cpp/build + +# 设置XPU L3 Cache (R200是63Mb) +export XPU_PADDLE_L3_SIZE=67104768 +# 运行benchmark验证 ./benchmark --model ResNet50_infer --config_path ../config/config.xpu.paddle.fp32.txt --enable_log_info + cd /serving ``` 输出为: