mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
Revert "[Backend] Add KunlunXin XPU deploy support" (#893)
Revert "[Backend] Add KunlunXin XPU deploy support (#747)"
This reverts commit 5be839b322
.
This commit is contained in:
@@ -51,7 +51,6 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
|
||||
bool use_ipu = (runtime_option.device == Device::IPU);
|
||||
bool use_rknpu = (runtime_option.device == Device::RKNPU);
|
||||
bool use_timvx = (runtime_option.device == Device::TIMVX);
|
||||
bool use_xpu = (runtime_option.device == Device::XPU);
|
||||
|
||||
if (use_gpu) {
|
||||
if (!IsSupported(valid_gpu_backends, runtime_option.backend)) {
|
||||
@@ -68,11 +67,6 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
|
||||
FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else if (use_xpu) {
|
||||
if (!IsSupported(valid_xpu_backends, runtime_option.backend)) {
|
||||
FDERROR << "The valid xpu backends of model " << ModelName() << " are " << Str(valid_xpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else if(use_ipu) {
|
||||
if (!IsSupported(valid_ipu_backends, runtime_option.backend)) {
|
||||
FDERROR << "The valid ipu backends of model " << ModelName() << " are " << Str(valid_ipu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||
@@ -108,8 +102,6 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
|
||||
return CreateRKNPUBackend();
|
||||
} else if (runtime_option.device == Device::TIMVX) {
|
||||
return CreateTimVXBackend();
|
||||
} else if (runtime_option.device == Device::XPU) {
|
||||
return CreateXPUBackend();
|
||||
} else if (runtime_option.device == Device::IPU) {
|
||||
#ifdef WITH_IPU
|
||||
return CreateIpuBackend();
|
||||
@@ -119,7 +111,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU now." << std::endl;
|
||||
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX now." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -233,29 +225,6 @@ bool FastDeployModel::CreateTimVXBackend() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool FastDeployModel::CreateXPUBackend() {
|
||||
if (valid_xpu_backends.size() == 0) {
|
||||
FDERROR << "There's no valid xpu backends for model: " << ModelName()
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < valid_xpu_backends.size(); ++i) {
|
||||
if (!IsBackendAvailable(valid_xpu_backends[i])) {
|
||||
continue;
|
||||
}
|
||||
runtime_option.backend = valid_xpu_backends[i];
|
||||
runtime_ = std::unique_ptr<Runtime>(new Runtime());
|
||||
if (!runtime_->Init(runtime_option)) {
|
||||
return false;
|
||||
}
|
||||
runtime_initialized_ = true;
|
||||
return true;
|
||||
}
|
||||
FDERROR << "Found no valid backend for model: " << ModelName() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool FastDeployModel::CreateIpuBackend() {
|
||||
if (valid_ipu_backends.size() == 0) {
|
||||
FDERROR << "There's no valid ipu backends for model: " << ModelName()
|
||||
|
Reference in New Issue
Block a user