[XPU] Add XPU option for benchmark example (#1287)

* [XPU] Add xpu option for benchmark example

* [XPU] Add fp16 flags
This commit is contained in:
DefTruth
2023-02-10 11:24:40 +08:00
committed by GitHub
parent 1476f15d80
commit ce424c313e
3 changed files with 105 additions and 7 deletions

View File

@@ -20,8 +20,8 @@
DEFINE_string(model, "", "Directory of the inference model.");
DEFINE_string(image, "", "Path of the image file.");
DEFINE_string(device, "cpu",
"Type of inference device, support 'cpu' or 'gpu'.");
DEFINE_int32(device_id, 0, "device(gpu) id.");
"Type of inference device, support 'cpu/gpu/xpu'.");
DEFINE_int32(device_id, 0, "device(gpu/xpu/...) id.");
DEFINE_int32(warmup, 200, "Number of warmup for profiling.");
DEFINE_int32(repeat, 1000, "Number of repeats for profiling.");
DEFINE_string(profile_mode, "runtime", "runtime or end2end.");
@@ -41,8 +41,8 @@ DEFINE_int32(dump_period, 100, "How often to collect memory info.");
void PrintUsage() {
std::cout << "Usage: infer_demo --model model_path --image img_path --device "
"[cpu|gpu] --backend "
"[default|ort|paddle|ov|trt|paddle_trt] "
"[cpu|gpu|xpu] --backend "
"[default|ort|paddle|ov|trt|paddle_trt|lite] "
"--use_fp16 false"
<< std::endl;
std::cout << "Default value of device: cpu" << std::endl;
@@ -52,7 +52,7 @@ void PrintUsage() {
bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
if (FLAGS_device == "gpu") {
option->UseGpu();
option->UseGpu(FLAGS_device_id);
if (FLAGS_backend == "ort") {
option->UseOrtBackend();
} else if (FLAGS_backend == "paddle") {
@@ -94,8 +94,27 @@ bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
<< FLAGS_backend << " is not supported." << std::endl;
return false;
}
} else if (FLAGS_device == "xpu") {
option->UseKunlunXin(FLAGS_device_id);
if (FLAGS_backend == "ort") {
option->UseOrtBackend();
} else if (FLAGS_backend == "paddle") {
option->UsePaddleInferBackend();
} else if (FLAGS_backend == "lite") {
option->UsePaddleLiteBackend();
if (FLAGS_use_fp16) {
option->EnableLiteFP16();
}
} else if (FLAGS_backend == "default") {
return true;
} else {
std::cout << "While inference with XPU, only support "
"default/ort/paddle/lite now, "
<< FLAGS_backend << " is not supported." << std::endl;
return false;
}
} else {
std::cerr << "Only support device CPU/GPU now, " << FLAGS_device
std::cerr << "Only support device CPU/GPU/XPU now, " << FLAGS_device
<< " is not supported." << std::endl;
return false;
}