diff --git a/benchmark/benchmark_ppcls.py b/benchmark/benchmark_ppcls.py index 8eeeb8cfc..b4cbcd8c6 100755 --- a/benchmark/benchmark_ppcls.py +++ b/benchmark/benchmark_ppcls.py @@ -75,6 +75,11 @@ def build_option(args): option.use_ort_backend() elif backend == "paddle": option.use_paddle_backend() + elif backend == "ov": + option.use_openvino_backend() + option.set_openvino_device(name="GPU") + # change name and shape for models + option.set_openvino_shape_info({"x": [1, 3, 224, 224]}) elif backend in ["trt", "paddle_trt"]: option.use_trt_backend() if backend == "paddle_trt": diff --git a/benchmark/benchmark_ppdet.py b/benchmark/benchmark_ppdet.py index 6d08aafb8..1a2297b4f 100755 --- a/benchmark/benchmark_ppdet.py +++ b/benchmark/benchmark_ppdet.py @@ -75,6 +75,17 @@ def build_option(args): option.use_ort_backend() elif backend == "paddle": option.use_paddle_backend() + elif backend == "ov": + option.use_openvino_backend() + # Using GPU and CPU heterogeneous execution mode + option.set_openvino_device("HETERO:GPU,CPU") + # change name and shape for models + option.set_openvino_shape_info({ + "image": [1, 3, 320, 320], + "scale_factor": [1, 2] + }) + # Set CPU up operator + option.set_openvino_cpu_operators(["MulticlassNms"]) elif backend in ["trt", "paddle_trt"]: option.use_trt_backend() if backend == "paddle_trt": diff --git a/benchmark/benchmark_ppseg.py b/benchmark/benchmark_ppseg.py index 7d9df9f07..b146510d6 100755 --- a/benchmark/benchmark_ppseg.py +++ b/benchmark/benchmark_ppseg.py @@ -75,6 +75,11 @@ def build_option(args): option.use_ort_backend() elif backend == "paddle": option.use_paddle_backend() + elif backend == "ov": + option.use_openvino_backend() + option.set_openvino_device(name="GPU") # use gpu + # change name and shape for models + option.set_openvino_shape_info({"x": [1, 3, 512, 512]}) elif backend in ["trt", "paddle_trt"]: option.use_trt_backend() if backend == "paddle_trt": diff --git a/benchmark/benchmark_yolo.py b/benchmark/benchmark_yolo.py index dd63cefb6..a90bcab3d 100755 --- a/benchmark/benchmark_yolo.py +++ b/benchmark/benchmark_yolo.py @@ -75,6 +75,11 @@ def build_option(args): option.use_ort_backend() elif backend == "paddle": option.use_paddle_backend() + elif backend == "ov": + option.use_openvino_backend() + option.set_openvino_device(name="GPU") + # change name and shape for models + option.set_openvino_shape_info({"images": [1, 3, 640, 640]}) elif backend in ["trt", "paddle_trt"]: option.use_trt_backend() if backend == "paddle_trt": diff --git a/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc b/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc old mode 100644 new mode 100755 diff --git a/fastdeploy/backends/openvino/ov_backend.cc b/fastdeploy/backends/openvino/ov_backend.cc old mode 100644 new mode 100755 index 9e8c2571a..6858f8547 --- a/fastdeploy/backends/openvino/ov_backend.cc +++ b/fastdeploy/backends/openvino/ov_backend.cc @@ -176,7 +176,7 @@ bool OpenVINOBackend::InitFromPaddle(const std::string& model_file, } ov::AnyMap properties; - if (option_.cpu_thread_num > 0) { + if (option_.device == "CPU" && option_.cpu_thread_num > 0) { properties["INFERENCE_NUM_THREADS"] = option_.cpu_thread_num; } if (option_.device == "CPU") { @@ -306,7 +306,7 @@ bool OpenVINOBackend::InitFromOnnx(const std::string& model_file, } ov::AnyMap properties; - if (option_.cpu_thread_num > 0) { + if (option_.device == "CPU" && option_.cpu_thread_num > 0) { properties["INFERENCE_NUM_THREADS"] = option_.cpu_thread_num; } if (option_.device == "CPU") {