[Backend] Add KunlunXin XPU deploy support (#747)

* add xpu support

* fix docs

* update code

* update doc

* update code

* update yolov5

* update cmake

* add int64_t data support

* fix

* update download links

* add en doc

* update code

* update xpu options

* update doc

* update doc

* update doc

* update lib links

* update doc

* update code

* update lite xpu link

* update xpu lib

* update doc

* update en doc
This commit is contained in:
yeliang2258
2022-12-15 21:17:14 +08:00
committed by GitHub
parent 6e79df40d9
commit 5be839b322
39 changed files with 870 additions and 58 deletions

View File

@@ -51,6 +51,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
bool use_ipu = (runtime_option.device == Device::IPU);
bool use_rknpu = (runtime_option.device == Device::RKNPU);
bool use_timvx = (runtime_option.device == Device::TIMVX);
bool use_xpu = (runtime_option.device == Device::XPU);
if (use_gpu) {
if (!IsSupported(valid_gpu_backends, runtime_option.backend)) {
@@ -67,6 +68,11 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if (use_xpu) {
if (!IsSupported(valid_xpu_backends, runtime_option.backend)) {
FDERROR << "The valid xpu backends of model " << ModelName() << " are " << Str(valid_xpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if(use_ipu) {
if (!IsSupported(valid_ipu_backends, runtime_option.backend)) {
FDERROR << "The valid ipu backends of model " << ModelName() << " are " << Str(valid_ipu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
@@ -102,6 +108,8 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return CreateRKNPUBackend();
} else if (runtime_option.device == Device::TIMVX) {
return CreateTimVXBackend();
} else if (runtime_option.device == Device::XPU) {
return CreateXPUBackend();
} else if (runtime_option.device == Device::IPU) {
#ifdef WITH_IPU
return CreateIpuBackend();
@@ -111,7 +119,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return false;
#endif
}
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX now." << std::endl;
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU now." << std::endl;
return false;
}
@@ -225,6 +233,29 @@ bool FastDeployModel::CreateTimVXBackend() {
return false;
}
bool FastDeployModel::CreateXPUBackend() {
if (valid_xpu_backends.size() == 0) {
FDERROR << "There's no valid xpu backends for model: " << ModelName()
<< std::endl;
return false;
}
for (size_t i = 0; i < valid_xpu_backends.size(); ++i) {
if (!IsBackendAvailable(valid_xpu_backends[i])) {
continue;
}
runtime_option.backend = valid_xpu_backends[i];
runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;
}
runtime_initialized_ = true;
return true;
}
FDERROR << "Found no valid backend for model: " << ModelName() << std::endl;
return false;
}
bool FastDeployModel::CreateIpuBackend() {
if (valid_ipu_backends.size() == 0) {
FDERROR << "There's no valid ipu backends for model: " << ModelName()