[Backend] Support Intel GPU with OpenVINO (#472)

* Update ov_backend.cc

* Update ov_backend.cc

* support set openvino device
This commit is contained in:
Jason
2022-11-17 17:08:49 +08:00
committed by GitHub
parent 3c5e8cd95d
commit dbe96cd049
7 changed files with 24 additions and 3 deletions

View File

@@ -158,7 +158,8 @@ bool OpenVINOBackend::InitFromPaddle(const std::string& model_file,
} else if (option_.ov_num_streams > 0) { } else if (option_.ov_num_streams > 0) {
properties["NUM_STREAMS"] = option_.ov_num_streams; properties["NUM_STREAMS"] = option_.ov_num_streams;
} }
compiled_model_ = core_.compile_model(model, "CPU", properties); FDINFO << "Compile OpenVINO model on device_name:" << option.device << "." << std::endl;
compiled_model_ = core_.compile_model(model, option.device, properties);
request_ = compiled_model_.create_infer_request(); request_ = compiled_model_.create_infer_request();
initialized_ = true; initialized_ = true;
@@ -255,7 +256,8 @@ bool OpenVINOBackend::InitFromOnnx(const std::string& model_file,
} else if (option_.ov_num_streams > 0) { } else if (option_.ov_num_streams > 0) {
properties["NUM_STREAMS"] = option_.ov_num_streams; properties["NUM_STREAMS"] = option_.ov_num_streams;
} }
compiled_model_ = core_.compile_model(model, "CPU", properties); FDINFO << "Compile OpenVINO model on device_name:" << option.device << "." << std::endl;
compiled_model_ = core_.compile_model(model, option.device, properties);
request_ = compiled_model_.create_infer_request(); request_ = compiled_model_.create_infer_request();

View File

@@ -26,6 +26,7 @@
namespace fastdeploy { namespace fastdeploy {
struct OpenVINOBackendOption { struct OpenVINOBackendOption {
std::string device = "CPU";
int cpu_thread_num = -1; int cpu_thread_num = -1;
int ov_num_streams = 1; int ov_num_streams = 1;
std::map<std::string, std::vector<int64_t>> shape_infos; std::map<std::string, std::vector<int64_t>> shape_infos;

View File

@@ -33,6 +33,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend) .def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
.def("use_lite_backend", &RuntimeOption::UseLiteBackend) .def("use_lite_backend", &RuntimeOption::UseLiteBackend)
.def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN) .def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN)
.def("set_openvino_device", &RuntimeOption::SetOpenVINODevice)
.def("enable_paddle_log_info", &RuntimeOption::EnablePaddleLogInfo) .def("enable_paddle_log_info", &RuntimeOption::EnablePaddleLogInfo)
.def("disable_paddle_log_info", &RuntimeOption::DisablePaddleLogInfo) .def("disable_paddle_log_info", &RuntimeOption::DisablePaddleLogInfo)
.def("set_paddle_mkldnn_cache_size", .def("set_paddle_mkldnn_cache_size",

View File

@@ -332,6 +332,10 @@ void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) {
pd_mkldnn_cache_size = size; pd_mkldnn_cache_size = size;
} }
void RuntimeOption::SetOpenVINODevice(const std::string& name) {
openvino_device = name;
}
void RuntimeOption::EnableLiteFP16() { void RuntimeOption::EnableLiteFP16() {
lite_enable_fp16 = true; lite_enable_fp16 = true;
} }
@@ -641,6 +645,7 @@ void Runtime::CreateOpenVINOBackend() {
#ifdef ENABLE_OPENVINO_BACKEND #ifdef ENABLE_OPENVINO_BACKEND
auto ov_option = OpenVINOBackendOption(); auto ov_option = OpenVINOBackendOption();
ov_option.cpu_thread_num = option.cpu_thread_num; ov_option.cpu_thread_num = option.cpu_thread_num;
ov_option.device = option.openvino_device;
ov_option.ov_num_streams = option.ov_num_streams; ov_option.ov_num_streams = option.ov_num_streams;
FDASSERT(option.model_format == ModelFormat::PADDLE || FDASSERT(option.model_format == ModelFormat::PADDLE ||
option.model_format == ModelFormat::ONNX, option.model_format == ModelFormat::ONNX,

View File

@@ -168,6 +168,11 @@ struct FASTDEPLOY_DECL RuntimeOption {
*/ */
void SetPaddleMKLDNNCacheSize(int size); void SetPaddleMKLDNNCacheSize(int size);
/**
* @brief Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
*/
void SetOpenVINODevice(const std::string& name = "CPU");
/** /**
* @brief Set optimzed model dir for Paddle Lite backend. * @brief Set optimzed model dir for Paddle Lite backend.
*/ */
@@ -344,6 +349,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
size_t trt_max_batch_size = 32; size_t trt_max_batch_size = 32;
size_t trt_max_workspace_size = 1 << 30; size_t trt_max_workspace_size = 1 << 30;
// ======Only for OpenVINO Backend======
std::string openvino_device = "CPU";
// ======Only for Poros Backend======= // ======Only for Poros Backend=======
bool is_dynamic = false; bool is_dynamic = false;
bool long_to_int = true; bool long_to_int = true;

View File

@@ -37,7 +37,6 @@ namespace fastdeploy {
namespace vision { namespace vision {
void FuseTransforms(std::vector<std::shared_ptr<Processor>>* processors); void FuseTransforms(std::vector<std::shared_ptr<Processor>>* processors);
// Fuse Normalize + Cast(Float) to Normalize // Fuse Normalize + Cast(Float) to Normalize
void FuseNormalizeCast(std::vector<std::shared_ptr<Processor>>* processors); void FuseNormalizeCast(std::vector<std::shared_ptr<Processor>>* processors);
// Fuse Normalize + HWC2CHW to NormalizeAndPermute // Fuse Normalize + HWC2CHW to NormalizeAndPermute

View File

@@ -269,6 +269,11 @@ class RuntimeOption:
""" """
return self._option.set_paddle_mkldnn(use_mkldnn) return self._option.set_paddle_mkldnn(use_mkldnn)
def set_openvino_device(self, name="CPU"):
"""Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
"""
return self._option.set_openvino_device(name)
def enable_paddle_log_info(self): def enable_paddle_log_info(self):
"""Enable print out the debug log information while using Paddle Inference backend, the log information is disabled by default. """Enable print out the debug log information while using Paddle Inference backend, the log information is disabled by default.
""" """