[XPU] Support XPU via Paddle Inference backend (#1987)

* [backend] Support XPU via Paddle Inference backend

* [backend] Support XPU via Paddle Inference backend

* [backend] Support XPU via Paddle Inference backend

* [XPU] support XPU benchmark via paddle inference

* [XPU] support XPU benchmark via paddle inference

* [benchmark] add xpu paddle h2d config files
This commit is contained in:
DefTruth
2023-05-25 14:13:40 +08:00
committed by GitHub
parent 24f32d10a7
commit 49c033a828
16 changed files with 262 additions and 57 deletions

View File

@@ -18,6 +18,7 @@
static void UpdateBaseCustomFlags(
std::unordered_map<std::string, std::string>& config_info) {
// see benchmark/cpp/flags.h
if (FLAGS_warmup > -1) {
config_info["warmup"] = std::to_string(FLAGS_warmup);
}
@@ -30,6 +31,14 @@ static void UpdateBaseCustomFlags(
if (FLAGS_use_fp16) {
config_info["use_fp16"] = "true";
}
if (FLAGS_xpu_l3_cache >= 0) {
config_info["xpu_l3_cache"] = std::to_string(FLAGS_xpu_l3_cache);
}
if (FLAGS_enable_log_info) {
config_info["enable_log_info"] = "true";
} else {
config_info["enable_log_info"] = "false";
}
}
static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
@@ -47,6 +56,9 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
option->EnableProfiling(config_info["include_h2d_d2h"] == "true",
repeat, warmup);
}
if (config_info["enable_log_info"] == "true") {
option->paddle_infer_option.enable_log_info = true;
}
if (config_info["device"] == "gpu") {
option->UseGpu(std::stoi(config_info["device_id"]));
if (config_info["backend"] == "ort") {
@@ -104,16 +116,14 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
return false;
}
} else if (config_info["device"] == "xpu") {
if (FLAGS_xpu_l3_cache >= 0) {
option->UseKunlunXin(std::stoi(config_info["device_id"]),
FLAGS_xpu_l3_cache);
} else {
option->UseKunlunXin(std::stoi(config_info["device_id"]),
std::stoi(config_info["xpu_l3_cache"]));
}
option->UseKunlunXin(std::stoi(config_info["device_id"]),
std::stoi(config_info["xpu_l3_cache"]));
if (config_info["backend"] == "ort") {
option->UseOrtBackend();
} else if (config_info["backend"] == "paddle") {
// Note: For inference + XPU fp16, As long as the
// model is fp16, it can automatically run on the
// fp16 precision.
option->UsePaddleInferBackend();
} else if (config_info["backend"] == "lite") {
option->UsePaddleLiteBackend();