mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 00:33:03 +08:00
[XPU] Add XPU option for benchmark example (#1287)
* [XPU] Add xpu option for benchmark example * [XPU] Add fp16 flags
This commit is contained in:
@@ -20,8 +20,8 @@
|
||||
DEFINE_string(model, "", "Directory of the inference model.");
|
||||
DEFINE_string(image, "", "Path of the image file.");
|
||||
DEFINE_string(device, "cpu",
|
||||
"Type of inference device, support 'cpu' or 'gpu'.");
|
||||
DEFINE_int32(device_id, 0, "device(gpu) id.");
|
||||
"Type of inference device, support 'cpu/gpu/xpu'.");
|
||||
DEFINE_int32(device_id, 0, "device(gpu/xpu/...) id.");
|
||||
DEFINE_int32(warmup, 200, "Number of warmup for profiling.");
|
||||
DEFINE_int32(repeat, 1000, "Number of repeats for profiling.");
|
||||
DEFINE_string(profile_mode, "runtime", "runtime or end2end.");
|
||||
@@ -41,8 +41,8 @@ DEFINE_int32(dump_period, 100, "How often to collect memory info.");
|
||||
|
||||
void PrintUsage() {
|
||||
std::cout << "Usage: infer_demo --model model_path --image img_path --device "
|
||||
"[cpu|gpu] --backend "
|
||||
"[default|ort|paddle|ov|trt|paddle_trt] "
|
||||
"[cpu|gpu|xpu] --backend "
|
||||
"[default|ort|paddle|ov|trt|paddle_trt|lite] "
|
||||
"--use_fp16 false"
|
||||
<< std::endl;
|
||||
std::cout << "Default value of device: cpu" << std::endl;
|
||||
@@ -52,7 +52,7 @@ void PrintUsage() {
|
||||
|
||||
bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
|
||||
if (FLAGS_device == "gpu") {
|
||||
option->UseGpu();
|
||||
option->UseGpu(FLAGS_device_id);
|
||||
if (FLAGS_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
@@ -94,8 +94,27 @@ bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
|
||||
<< FLAGS_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else if (FLAGS_device == "xpu") {
|
||||
option->UseKunlunXin(FLAGS_device_id);
|
||||
if (FLAGS_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
option->UsePaddleInferBackend();
|
||||
} else if (FLAGS_backend == "lite") {
|
||||
option->UsePaddleLiteBackend();
|
||||
if (FLAGS_use_fp16) {
|
||||
option->EnableLiteFP16();
|
||||
}
|
||||
} else if (FLAGS_backend == "default") {
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with XPU, only support "
|
||||
"default/ort/paddle/lite now, "
|
||||
<< FLAGS_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "Only support device CPU/GPU now, " << FLAGS_device
|
||||
std::cerr << "Only support device CPU/GPU/XPU now, " << FLAGS_device
|
||||
<< " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
Reference in New Issue
Block a user