Rename CANN to Ascend

This commit is contained in:
yunyaoXYY
2022-12-19 12:47:13 +00:00
parent f920096e7c
commit ae08bc8c89
30 changed files with 141 additions and 123 deletions

View File

@@ -65,8 +65,8 @@ option(ENABLE_VISION "Whether to enable vision models usage." OFF)
option(ENABLE_TEXT "Whether to enable text models usage." OFF) option(ENABLE_TEXT "Whether to enable text models usage." OFF)
option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF) option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF) option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_CANN "Whether to compile for Huawei Ascend deploy with CANN." OFF) option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF)
option(WITH_CANN_PY "Whether to compile for Huawei Ascend deploy with CANN using python." OFF) option(WITH_ASCEND_PYTHON "Whether to compile for Huawei Ascend deploy using python." OFF)
option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF) option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF) option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF) option(WITH_TESTING "Whether to compile with unittest." OFF)
@@ -146,7 +146,7 @@ if (WITH_TIMVX)
include(${PROJECT_SOURCE_DIR}/cmake/timvx.cmake) include(${PROJECT_SOURCE_DIR}/cmake/timvx.cmake)
endif() endif()
if (WITH_CANN) if (WITH_ASCEND)
if(NOT ${ENABLE_LITE_BACKEND}) if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON) set(ENABLE_LITE_BACKEND ON)
endif() endif()
@@ -158,8 +158,8 @@ if (WITH_CANN)
endif() endif()
endif() endif()
if (WITH_CANN_PY) if (WITH_ASCEND_PYTHON)
message(WARNING "This is only for CANN python version") message(WARNING "This is only for Ascend python version")
if(NOT ${ENABLE_LITE_BACKEND}) if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON) set(ENABLE_LITE_BACKEND ON)
endif() endif()
@@ -175,7 +175,7 @@ if (WITH_CANN_PY)
OUTPUT_VARIABLE curr_out OUTPUT_VARIABLE curr_out
ERROR_VARIABLE curr_out) ERROR_VARIABLE curr_out)
if(ret EQUAL "1") if(ret EQUAL "1")
message(FATAL_ERROR "Failed to patchelf CANN libraries.") message(FATAL_ERROR "Failed to patchelf Paddle Lite libraries when using Ascend.")
endif() endif()
message(STATUS "result:${result} out:${curr_out}") message(STATUS "result:${result} out:${curr_out}")
endif() endif()

View File

@@ -37,8 +37,8 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}") message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}")
message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}") message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}")
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}") message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " WITH_CANN : ${WITH_CANN}") message(STATUS " WITH_ASCEND : ${WITH_ASCEND}")
message(STATUS " WITH_CANN_PY : ${WITH_CANN_PY}") message(STATUS " WITH_ASCEND_PYTHON : ${WITH_ASCEND_PYTHON}")
message(STATUS " WITH_TIMVX : ${WITH_TIMVX}") message(STATUS " WITH_TIMVX : ${WITH_TIMVX}")
message(STATUS " WITH_XPU : ${WITH_XPU}") message(STATUS " WITH_XPU : ${WITH_XPU}")
if(ENABLE_ORT_BACKEND) if(ENABLE_ORT_BACKEND)

View File

@@ -24,8 +24,8 @@
| ENABLE_PADDLE_BACKEND | 默认OFF是否编译集成Paddle Inference后端(CPU/GPU上推荐打开) | | ENABLE_PADDLE_BACKEND | 默认OFF是否编译集成Paddle Inference后端(CPU/GPU上推荐打开) |
| ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) | | ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
| ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) | | ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
| WITH_CANN | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON | | WITH_ASCEND | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON |
| WITH_CANN_PY | 默认OFF当在华为昇腾NPU上,并使用Python部署时, 需要设置为ON | | WITH_ASCEND_PYTHON | 默认OFF当在华为昇腾NPU上,并使用Python部署时, 需要设置为ON |
| ENABLE_TIMVX | 默认OFF需要在RV1126/RV1109上部署时需设置为ON | | ENABLE_TIMVX | 默认OFF需要在RV1126/RV1109上部署时需设置为ON |
| WITH_XPU | 默认OFF当在昆仑芯XPU上部署时需设置为ON | | WITH_XPU | 默认OFF当在昆仑芯XPU上部署时需设置为ON |
| WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON | | WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON |

View File

@@ -69,8 +69,8 @@ cd FastDeploy
mkdir build && cd build mkdir build && cd build
# CMake configuration with CANN # CMake configuration with CANN
cmake -DWITH_CANN=ON \ cmake -DWITH_ASCEND=ON \
-DCMAKE_INSTALL_PREFIX=fastdeploy-cann \ -DCMAKE_INSTALL_PREFIX=fastdeploy-ascend \
-DENABLE_VISION=ON \ -DENABLE_VISION=ON \
.. ..
@@ -78,7 +78,7 @@ cmake -DWITH_CANN=ON \
make -j8 make -j8
make install make install
``` ```
编译完成之后会在当前的build目录下生成 fastdeploy-cann 目录,表示基于 PadddleLite CANN 的 FastDeploy 库编译完成。 编译完成之后会在当前的build目录下生成 fastdeploy-ascend 目录,表示基于 PadddleLite 的 FastDeploy 库编译完成。
## 四.基于 PaddleLite 的 Python FastDeploy 库编译 ## 四.基于 PaddleLite 的 Python FastDeploy 库编译
搭建好编译环境之后,编译命令如下: 搭建好编译环境之后,编译命令如下:
@@ -86,7 +86,7 @@ make install
# Download the latest source code # Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python cd FastDeploy/python
export WITH_CANN_PY=ON export WITH_ASCEND_PYTHON=ON
export ENABLE_VISION=ON export ENABLE_VISION=ON
python setup.py build python setup.py build

View File

@@ -2,7 +2,7 @@ PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10) CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径 # 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of fastdeploy-cann sdk") option(FASTDEPLOY_INSTALL_DIR "Path of fastdeploy-ascend sdk")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)

View File

@@ -15,7 +15,7 @@
# 编译当前demo # 编译当前demo
mkdir build mkdir build
cd build cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=../../../../../../build/fastdeploy-cann cmake .. -DFASTDEPLOY_INSTALL_DIR=../../../../../../build/fastdeploy-ascend
make -j8 make -j8
cd .. cd ..

View File

@@ -25,9 +25,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto config_file = model_dir + sep + "inference_cls.yaml"; auto config_file = model_dir + sep + "inference_cls.yaml";
fastdeploy::RuntimeOption option; fastdeploy::RuntimeOption option;
option.UseCANN(); option.UseAscend();
option.SetNNAdapterDeviceNames({"huawei_ascend_npu"});
option.SetNNAdapterContextProperties("HUAWEI_ASCEND_NPU_SELECTED_DEVICE_IDS=0");
auto model = fastdeploy::vision::classification::PaddleClasModel( auto model = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option); model_file, params_file, config_file, option);

View File

@@ -2,8 +2,8 @@
export GLOG_v=5 export GLOG_v=5
# 设置本demo的环境变量 # 设置本demo的环境变量
# 正确设置fastdeploy-cann的安装路径 # 正确设置fastdeploy-ascend的安装路径
FASTDEPLOY_INSTALL_DIR="../../../../../../build/fastdeploy-cann/" FASTDEPLOY_INSTALL_DIR="../../../../../../build/fastdeploy-ascend/"
# 设置fastdeploy,opencv和paddlelite相关的环境变量 # 设置fastdeploy,opencv和paddlelite相关的环境变量
export LD_LIBRARY_PATH=$FASTDEPLOY_INSTALL_DIR/lib/:$FASTDEPLOY_INSTALL_DIR/third_libs/install/opencv/lib/:$FASTDEPLOY_INSTALL_DIR/third_libs/install/paddlelite/lib/:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$FASTDEPLOY_INSTALL_DIR/lib/:$FASTDEPLOY_INSTALL_DIR/third_libs/install/opencv/lib/:$FASTDEPLOY_INSTALL_DIR/third_libs/install/paddlelite/lib/:$LD_LIBRARY_PATH

View File

@@ -18,10 +18,9 @@ def parse_arguments():
def build_option(): def build_option():
option = fd.RuntimeOption()
option.use_cann() option = fd.RuntimeOption()
option.set_lite_nnadapter_device_names(["huawei_ascend_npu"]) option.use_ascend()
return option return option

View File

@@ -89,9 +89,34 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)}); paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)});
valid_places.push_back( valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)}); paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
}else if(option_.enable_cann){ }else if(option_.enable_ascend){
if(!option_.nnadapter_device_names.empty()){
config_.set_nnadapter_device_names(option_.nnadapter_device_names); config_.set_nnadapter_device_names(option_.nnadapter_device_names);
config_.set_nnadapter_context_properties(option_.nnadapter_context_properties); } else {
config_.set_nnadapter_device_names({"huawei_ascend_npu"});
}
if(!option_.nnadapter_context_properties.empty()){
config_.set_nnadapter_context_properties(nnadapter_context_properties);
}
if(!option_.nnadapter_model_cache_dir.empty()){
config_.set_nnadapter_model_cache_dir(nnadapter_model_cache_dir);
}
if(!option_.nnadapter_mixed_precision_quantization_config_path.empty()){
config_.set_nnadapter_mixed_precision_quantization_config_path(
nnadapter_mixed_precision_quantization_config_path
);
}
if(!option_.nnadapter_subgraph_partition_config_path.empty()){
config_.nnadapter_subgraph_partition_config_path(
nnadapter_subgraph_partition_config_path
);
}
valid_places.push_back( valid_places.push_back(
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)}); paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)});
valid_places.push_back( valid_places.push_back(

View File

@@ -52,7 +52,7 @@ struct LiteBackendOption {
nnadapter_dynamic_shape_info = {{" ", {{0}}}}; nnadapter_dynamic_shape_info = {{" ", {{0}}}};
std::string nnadapter_mixed_precision_quantization_config_path = ""; std::string nnadapter_mixed_precision_quantization_config_path = "";
bool enable_timvx = false; bool enable_timvx = false;
bool enable_cann = false; bool enable_ascend = false;
bool enable_xpu = false; bool enable_xpu = false;
int device_id = 0; int device_id = 0;
int xpu_l3_workspace_size = 0xfffc00; int xpu_l3_workspace_size = 0xfffc00;

View File

@@ -62,8 +62,8 @@ std::string Str(const Device& d) {
case Device::TIMVX: case Device::TIMVX:
out = "Device::TIMVX"; out = "Device::TIMVX";
break; break;
case Device::CANN: case Device::ASCEND:
out = "Device::CANN"; out = "Device::ASCEND";
break; break;
case Device::XPU: case Device::XPU:
out = "Device::XPU"; out = "Device::XPU";
@@ -91,8 +91,8 @@ std::ostream& operator<<(std::ostream& out,const Device& d){
case Device::XPU: case Device::XPU:
out << "Device::XPU"; out << "Device::XPU";
break; break;
case Device::CANN: case Device::ASCEND:
out << "Device::CANN"; out << "Device::ASCEND";
break; break;
default: default:
out << "Device::UNKOWN"; out << "Device::UNKOWN";

View File

@@ -22,7 +22,7 @@
namespace fastdeploy { namespace fastdeploy {
enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, XPU, CANN}; enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, XPU, ASCEND};
FASTDEPLOY_DECL std::string Str(const Device& d); FASTDEPLOY_DECL std::string Str(const Device& d);

View File

@@ -51,7 +51,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
bool use_ipu = (runtime_option.device == Device::IPU); bool use_ipu = (runtime_option.device == Device::IPU);
bool use_rknpu = (runtime_option.device == Device::RKNPU); bool use_rknpu = (runtime_option.device == Device::RKNPU);
bool use_timvx = (runtime_option.device == Device::TIMVX); bool use_timvx = (runtime_option.device == Device::TIMVX);
bool use_cann = (runtime_option.device == Device::CANN); bool use_ascend = (runtime_option.device == Device::ASCEND);
bool use_xpu = (runtime_option.device == Device::XPU); bool use_xpu = (runtime_option.device == Device::XPU);
if (use_gpu) { if (use_gpu) {
@@ -69,9 +69,9 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl; FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false; return false;
} }
} else if (use_cann) { } else if (use_ascend) {
if (!IsSupported(valid_cann_backends, runtime_option.backend)) { if (!IsSupported(valid_ascend_backends, runtime_option.backend)) {
FDERROR << "The valid cann backends of model " << ModelName() << " are " << Str(valid_cann_backends) << ", " << runtime_option.backend << " is not supported." << std::endl; FDERROR << "The valid ascend backends of model " << ModelName() << " are " << Str(valid_ascend_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false; return false;
} }
} else if (use_xpu) { } else if (use_xpu) {
@@ -114,8 +114,8 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return CreateRKNPUBackend(); return CreateRKNPUBackend();
} else if (runtime_option.device == Device::TIMVX) { } else if (runtime_option.device == Device::TIMVX) {
return CreateTimVXBackend(); return CreateTimVXBackend();
} else if (runtime_option.device == Device::CANN) { } else if (runtime_option.device == Device::ASCEND) {
return CreateCANNBackend(); return CreateASCENDBackend();
} else if (runtime_option.device == Device::XPU) { } else if (runtime_option.device == Device::XPU) {
return CreateXPUBackend(); return CreateXPUBackend();
} else if (runtime_option.device == Device::IPU) { } else if (runtime_option.device == Device::IPU) {
@@ -127,7 +127,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return false; return false;
#endif #endif
} }
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU/CANN now." << std::endl; FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU/ASCEND now." << std::endl;
return false; return false;
} }
@@ -265,18 +265,18 @@ bool FastDeployModel::CreateXPUBackend() {
} }
bool FastDeployModel::CreateCANNBackend() { bool FastDeployModel::CreateASCENDBackend() {
if (valid_cann_backends.size() == 0) { if (valid_ascend_backends.size() == 0) {
FDERROR << "There's no valid cann backends for model: " << ModelName() FDERROR << "There's no valid ascend backends for model: " << ModelName()
<< std::endl; << std::endl;
return false; return false;
} }
for (size_t i = 0; i < valid_cann_backends.size(); ++i) { for (size_t i = 0; i < valid_ascend_backends.size(); ++i) {
if (!IsBackendAvailable(valid_cann_backends[i])) { if (!IsBackendAvailable(valid_ascend_backends[i])) {
continue; continue;
} }
runtime_option.backend = valid_cann_backends[i]; runtime_option.backend = valid_ascend_backends[i];
runtime_ = std::unique_ptr<Runtime>(new Runtime()); runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) { if (!runtime_->Init(runtime_option)) {
return false; return false;

View File

@@ -45,9 +45,9 @@ class FASTDEPLOY_DECL FastDeployModel {
/** Model's valid timvx backends. This member defined all the timvx backends have successfully tested for the model /** Model's valid timvx backends. This member defined all the timvx backends have successfully tested for the model
*/ */
std::vector<Backend> valid_timvx_backends = {}; std::vector<Backend> valid_timvx_backends = {};
/** Model's valid cann backends. This member defined all the cann backends have successfully tested for the model /** Model's valid ascend backends. This member defined all the cann backends have successfully tested for the model
*/ */
std::vector<Backend> valid_cann_backends = {}; std::vector<Backend> valid_ascend_backends = {};
/** Model's valid KunlunXin xpu backends. This member defined all the KunlunXin xpu backends have successfully tested for the model /** Model's valid KunlunXin xpu backends. This member defined all the KunlunXin xpu backends have successfully tested for the model
*/ */
std::vector<Backend> valid_xpu_backends = {}; std::vector<Backend> valid_xpu_backends = {};
@@ -150,7 +150,7 @@ class FASTDEPLOY_DECL FastDeployModel {
bool CreateRKNPUBackend(); bool CreateRKNPUBackend();
bool CreateTimVXBackend(); bool CreateTimVXBackend();
bool CreateXPUBackend(); bool CreateXPUBackend();
bool CreateCANNBackend(); bool CreateASCENDBackend();
std::shared_ptr<Runtime> runtime_; std::shared_ptr<Runtime> runtime_;
bool runtime_initialized_ = false; bool runtime_initialized_ = false;

View File

@@ -23,7 +23,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_gpu", &RuntimeOption::UseGpu) .def("use_gpu", &RuntimeOption::UseGpu)
.def("use_cpu", &RuntimeOption::UseCpu) .def("use_cpu", &RuntimeOption::UseCpu)
.def("use_rknpu2", &RuntimeOption::UseRKNPU2) .def("use_rknpu2", &RuntimeOption::UseRKNPU2)
.def("use_cann", &RuntimeOption::UseCANN) .def("use_ascend", &RuntimeOption::UseAscend)
.def("use_xpu", &RuntimeOption::UseXpu) .def("use_xpu", &RuntimeOption::UseXpu)
.def("set_external_stream", &RuntimeOption::SetExternalStream) .def("set_external_stream", &RuntimeOption::SetExternalStream)
.def("set_cpu_thread_num", &RuntimeOption::SetCpuThreadNum) .def("set_cpu_thread_num", &RuntimeOption::SetCpuThreadNum)
@@ -34,13 +34,13 @@ void BindRuntime(pybind11::module& m) {
.def("use_trt_backend", &RuntimeOption::UseTrtBackend) .def("use_trt_backend", &RuntimeOption::UseTrtBackend)
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend) .def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
.def("use_lite_backend", &RuntimeOption::UseLiteBackend) .def("use_lite_backend", &RuntimeOption::UseLiteBackend)
.def("set_lite_nnadapter_device_names", &RuntimeOption::SetLiteNNAdapterDeviceNames) .def("set_lite_device_names", &RuntimeOption::SetLiteDeviceNames)
.def("set_lite_nnadapter_context_properties", &RuntimeOption::SetLiteNNAdapterContextProperties) .def("set_lite_context_properties", &RuntimeOption::SetLiteContextProperties)
.def("set_lite_nnadapter_model_cache_dir", &RuntimeOption::SetLiteNNAdapterModelCacheDir) .def("set_lite_model_cache_dir", &RuntimeOption::SetLiteModelCacheDir)
.def("set_lite_nnadapter_dynamic_shape_info", &RuntimeOption::SetLiteNNAdapterDynamicShapeInfo) .def("set_lite_dynamic_shape_info", &RuntimeOption::SetLiteDynamicShapeInfo)
.def("set_lite_nnadapter_subgraph_partition_path", &RuntimeOption::SetLiteNNAdapterSubgraphPartitionPath) .def("set_lite_subgraph_partition_path", &RuntimeOption::SetLiteSubgraphPartitionPath)
.def("set_lite_nnadapter_mixed_precision_quantization_config_path", &RuntimeOption::SetLiteNNAdapterMixedPrecisionQuantizationConfigPath) .def("set_lite_mixed_precision_quantization_config_path", &RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath)
.def("set_lite_nnadapter_subgraph_partition_config_buffer", &RuntimeOption::SetLiteNNAdapterSubgraphPartitionConfigBuffer) .def("set_lite_subgraph_partition_config_buffer", &RuntimeOption::SetLiteSubgraphPartitionConfigBuffer)
.def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN) .def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN)
.def("set_openvino_device", &RuntimeOption::SetOpenVINODevice) .def("set_openvino_device", &RuntimeOption::SetOpenVINODevice)
.def("set_openvino_shape_info", &RuntimeOption::SetOpenVINOShapeInfo) .def("set_openvino_shape_info", &RuntimeOption::SetOpenVINOShapeInfo)

View File

@@ -258,9 +258,9 @@ void RuntimeOption::UseXpu(int xpu_id,
device = Device::XPU; device = Device::XPU;
} }
void RuntimeOption::UseCANN(){ void RuntimeOption::UseAscend(){
enable_cann = true; enable_ascend = true;
device = Device::CANN; device = Device::ASCEND;
} }
void RuntimeOption::SetExternalStream(void* external_stream) { void RuntimeOption::SetExternalStream(void* external_stream) {
@@ -382,37 +382,37 @@ void RuntimeOption::SetLiteOptimizedModelDir(
lite_optimized_model_dir = optimized_model_dir; lite_optimized_model_dir = optimized_model_dir;
} }
void RuntimeOption::SetLiteNNAdapterSubgraphPartitionPath( void RuntimeOption::SetLiteSubgraphPartitionPath(
const std::string& nnadapter_subgraph_partition_config_path) { const std::string& nnadapter_subgraph_partition_config_path) {
lite_nnadapter_subgraph_partition_config_path = lite_nnadapter_subgraph_partition_config_path =
nnadapter_subgraph_partition_config_path; nnadapter_subgraph_partition_config_path;
} }
void RuntimeOption::SetLiteNNAdapterSubgraphPartitionConfigBuffer( void RuntimeOption::SetLiteSubgraphPartitionConfigBuffer(
const std::string& nnadapter_subgraph_partition_config_buffer){ const std::string& nnadapter_subgraph_partition_config_buffer){
lite_nnadapter_subgraph_partition_config_buffer = nnadapter_subgraph_partition_config_buffer; lite_nnadapter_subgraph_partition_config_buffer = nnadapter_subgraph_partition_config_buffer;
} }
void RuntimeOption::SetLiteNNAdapterDeviceNames(const std::vector<std::string>& nnadapter_device_names){ void RuntimeOption::SetLiteDeviceNames(const std::vector<std::string>& nnadapter_device_names){
lite_nnadapter_device_names = nnadapter_device_names; lite_nnadapter_device_names = nnadapter_device_names;
} }
void RuntimeOption::SetLiteNNAdapterContextProperties(const std::string& nnadapter_context_properties){ void RuntimeOption::SetLiteContextProperties(const std::string& nnadapter_context_properties){
lite_nnadapter_context_properties = nnadapter_context_properties; lite_nnadapter_context_properties = nnadapter_context_properties;
} }
void RuntimeOption::SetLiteNNAdapterModelCacheDir(const std::string& nnadapter_model_cache_dir){ void RuntimeOption::SetLiteModelCacheDir(const std::string& nnadapter_model_cache_dir){
lite_nnadapter_model_cache_dir = nnadapter_model_cache_dir; lite_nnadapter_model_cache_dir = nnadapter_model_cache_dir;
} }
void RuntimeOption::SetLiteNNAdapterDynamicShapeInfo( void RuntimeOption::SetLiteDynamicShapeInfo(
const std::map<std::string, std::vector<std::vector<int64_t>>>& const std::map<std::string, std::vector<std::vector<int64_t>>>&
nnadapter_dynamic_shape_info){ nnadapter_dynamic_shape_info){
lite_nnadapter_dynamic_shape_info = nnadapter_dynamic_shape_info; lite_nnadapter_dynamic_shape_info = nnadapter_dynamic_shape_info;
} }
void RuntimeOption::SetLiteNNAdapterMixedPrecisionQuantizationConfigPath( void RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath(
const std::string& nnadapter_mixed_precision_quantization_config_path){ const std::string& nnadapter_mixed_precision_quantization_config_path){
lite_nnadapter_mixed_precision_quantization_config_path = nnadapter_mixed_precision_quantization_config_path; lite_nnadapter_mixed_precision_quantization_config_path = nnadapter_mixed_precision_quantization_config_path;
} }
@@ -586,7 +586,7 @@ bool Runtime::Init(const RuntimeOption& _option) {
FDINFO << "Runtime initialized with Backend::OPENVINO in " FDINFO << "Runtime initialized with Backend::OPENVINO in "
<< Str(option.device) << "." << std::endl; << Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::LITE) { } else if (option.backend == Backend::LITE) {
FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX || option.device == Device::XPU || option.device == Device::CANN, FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX || option.device == Device::XPU || option.device == Device::ASCEND,
"Backend::LITE only supports Device::CPU/Device::TIMVX/Device::XPU."); "Backend::LITE only supports Device::CPU/Device::TIMVX/Device::XPU.");
CreateLiteBackend(); CreateLiteBackend();
FDINFO << "Runtime initialized with Backend::LITE in " << Str(option.device) FDINFO << "Runtime initialized with Backend::LITE in " << Str(option.device)
@@ -843,7 +843,7 @@ void Runtime::CreateLiteBackend() {
lite_option.nnadapter_dynamic_shape_info = option.lite_nnadapter_dynamic_shape_info; lite_option.nnadapter_dynamic_shape_info = option.lite_nnadapter_dynamic_shape_info;
lite_option.nnadapter_mixed_precision_quantization_config_path = option.lite_nnadapter_mixed_precision_quantization_config_path; lite_option.nnadapter_mixed_precision_quantization_config_path = option.lite_nnadapter_mixed_precision_quantization_config_path;
lite_option.enable_timvx = option.enable_timvx; lite_option.enable_timvx = option.enable_timvx;
lite_option.enable_cann = option.enable_cann; lite_option.enable_ascend = option.enable_ascend;
lite_option.enable_xpu = option.enable_xpu; lite_option.enable_xpu = option.enable_xpu;
lite_option.device_id = option.device_id; lite_option.device_id = option.device_id;
lite_option.xpu_l3_workspace_size = option.xpu_l3_workspace_size; lite_option.xpu_l3_workspace_size = option.xpu_l3_workspace_size;

View File

@@ -102,8 +102,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
/// Use TimVX to inference /// Use TimVX to inference
void UseTimVX(); void UseTimVX();
/// Use CANN to inference /// Use Huawei Ascend to inference
void UseCANN(); void UseAscend();
/// ///
/// \brief Turn on XPU. /// \brief Turn on XPU.
@@ -224,46 +224,46 @@ struct FASTDEPLOY_DECL RuntimeOption {
void SetLiteOptimizedModelDir(const std::string& optimized_model_dir); void SetLiteOptimizedModelDir(const std::string& optimized_model_dir);
/** /**
* @brief Set nnadapter subgraph partition path for Paddle Lite backend. * @brief Set subgraph partition path for Paddle Lite backend.
*/ */
void SetLiteNNAdapterSubgraphPartitionPath( void SetLiteSubgraphPartitionPath(
const std::string& nnadapter_subgraph_partition_config_path); const std::string& nnadapter_subgraph_partition_config_path);
/** /**
* @brief Set nnadapter subgraph partition path for Paddle Lite backend. * @brief Set subgraph partition path for Paddle Lite backend.
*/ */
void SetLiteNNAdapterSubgraphPartitionConfigBuffer( void SetLiteSubgraphPartitionConfigBuffer(
const std::string& nnadapter_subgraph_partition_config_buffer); const std::string& nnadapter_subgraph_partition_config_buffer);
/** /**
* @brief Set nnadapter device name for Paddle Lite backend. * @brief Set device name for Paddle Lite backend.
*/ */
void SetLiteNNAdapterDeviceNames( void SetLiteDeviceNames(
const std::vector<std::string>& nnadapter_device_names); const std::vector<std::string>& nnadapter_device_names);
/** /**
* @brief Set nnadapter context properties for Paddle Lite backend. * @brief Set context properties for Paddle Lite backend.
*/ */
void SetLiteNNAdapterContextProperties( void SetLiteContextProperties(
const std::string& nnadapter_context_properties); const std::string& nnadapter_context_properties);
/** /**
* @brief Set nnadapter model cache dir for Paddle Lite backend. * @brief Set model cache dir for Paddle Lite backend.
*/ */
void SetLiteNNAdapterModelCacheDir( void SetLiteModelCacheDir(
const std::string& nnadapter_model_cache_dir); const std::string& nnadapter_model_cache_dir);
/** /**
* @brief Set nnadapter dynamic shape info for Paddle Lite backend. * @brief Set dynamic shape info for Paddle Lite backend.
*/ */
void SetLiteNNAdapterDynamicShapeInfo( void SetLiteDynamicShapeInfo(
const std::map<std::string, std::vector<std::vector<int64_t>>>& const std::map<std::string, std::vector<std::vector<int64_t>>>&
nnadapter_dynamic_shape_info); nnadapter_dynamic_shape_info);
/** /**
* @brief Set nnadapter mixed precision quantization config path for Paddle Lite backend. * @brief Set mixed precision quantization config path for Paddle Lite backend.
*/ */
void SetLiteNNAdapterMixedPrecisionQuantizationConfigPath( void SetLiteMixedPrecisionQuantizationConfigPath(
const std::string& nnadapter_mixed_precision_quantization_config_path); const std::string& nnadapter_mixed_precision_quantization_config_path);
/** /**
@@ -434,7 +434,7 @@ struct FASTDEPLOY_DECL RuntimeOption {
std::string lite_nnadapter_mixed_precision_quantization_config_path = ""; std::string lite_nnadapter_mixed_precision_quantization_config_path = "";
bool enable_timvx = false; bool enable_timvx = false;
bool enable_cann = false; bool enable_ascend = false;
bool enable_xpu = false; bool enable_xpu = false;
// ======Only for Trt Backend======= // ======Only for Trt Backend=======

View File

@@ -29,7 +29,7 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
Backend::LITE}; Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE}; valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
valid_xpu_backends = {Backend::LITE}; valid_xpu_backends = {Backend::LITE};
valid_ipu_backends = {Backend::PDINFER}; valid_ipu_backends = {Backend::PDINFER};
} else if (model_format == ModelFormat::ONNX) { } else if (model_format == ModelFormat::ONNX) {

View File

@@ -29,7 +29,7 @@ YOLOv5::YOLOv5(const std::string& model_file, const std::string& params_file,
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE}; valid_xpu_backends = {Backend::LITE};
valid_timvx_backends = {Backend::LITE}; valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

View File

@@ -72,7 +72,7 @@ YOLOv6::YOLOv6(const std::string& model_file, const std::string& params_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

View File

@@ -27,7 +27,7 @@ YOLOv7::YOLOv7(const std::string& model_file, const std::string& params_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

View File

@@ -39,7 +39,7 @@ class FASTDEPLOY_DECL PicoDet : public PPDetBase {
Backend::PDINFER, Backend::LITE}; Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2}; valid_rknpu_backends = {Backend::RKNPU2};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
initialized = Initialize(); initialized = Initialize();
} }
@@ -66,7 +66,7 @@ class FASTDEPLOY_DECL PPYOLOE : public PPDetBase {
Backend::PDINFER, Backend::LITE}; Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE}; valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
initialized = Initialize(); initialized = Initialize();
} }
@@ -91,7 +91,7 @@ class FASTDEPLOY_DECL PPYOLO : public PPDetBase {
model_format) { model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER}; valid_gpu_backends = {Backend::PDINFER};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
initialized = Initialize(); initialized = Initialize();
} }
@@ -109,7 +109,7 @@ class FASTDEPLOY_DECL YOLOv3 : public PPDetBase {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE}; Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
initialized = Initialize(); initialized = Initialize();
} }
@@ -127,7 +127,7 @@ class FASTDEPLOY_DECL PaddleYOLOX : public PPDetBase {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE}; Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
initialized = Initialize(); initialized = Initialize();
} }
@@ -176,7 +176,7 @@ class FASTDEPLOY_DECL SSD : public PPDetBase {
model_format) { model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER}; valid_gpu_backends = {Backend::PDINFER};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
initialized = Initialize(); initialized = Initialize();
} }

View File

@@ -32,7 +32,7 @@ Classifier::Classifier(const std::string& model_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

View File

@@ -32,7 +32,7 @@ DBDetector::DBDetector(const std::string& model_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;

View File

@@ -91,7 +91,7 @@ bool RecognizerPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTenso
real_index = indices[i]; real_index = indices[i];
} }
FDMat* mat = &(images->at(real_index)); FDMat* mat = &(images->at(real_index));
#if defined(WITH_CANN) || defined(WITH_CANN_PY) #if defined(WITH_ASCEND) || defined(WITH_ASCEND_PYTHON)
OcrRecognizerResizeImageOnAscend(mat, rec_image_shape_); OcrRecognizerResizeImageOnAscend(mat, rec_image_shape_);
#else #else
OcrRecognizerResizeImage(mat, max_wh_ratio, rec_image_shape_); OcrRecognizerResizeImage(mat, max_wh_ratio, rec_image_shape_);

View File

@@ -34,7 +34,7 @@ Recognizer::Recognizer(const std::string& model_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;

View File

@@ -29,7 +29,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2}; valid_rknpu_backends = {Backend::RKNPU2};
valid_timvx_backends = {Backend::LITE}; valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;
runtime_option.model_file = model_file; runtime_option.model_file = model_file;

View File

@@ -283,10 +283,10 @@ class RuntimeOption:
rknpu2_core=rknpu2.CoreMask.RKNN_NPU_CORE_0): rknpu2_core=rknpu2.CoreMask.RKNN_NPU_CORE_0):
return self._option.use_rknpu2(rknpu2_name, rknpu2_core) return self._option.use_rknpu2(rknpu2_name, rknpu2_core)
def use_cann(self): def use_ascend(self):
"""Inference with Huawei Ascend NPU """Inference with Huawei Ascend NPU
""" """
return self._option.use_cann() return self._option.use_ascend()
def set_cpu_thread_num(self, thread_num=-1): def set_cpu_thread_num(self, thread_num=-1):
"""Set number of threads if inference with CPU """Set number of threads if inference with CPU
@@ -342,47 +342,44 @@ class RuntimeOption:
""" """
return self.use_lite_backend() return self.use_lite_backend()
def set_lite_nnadapter_device_names(self, device_names): def set_lite_device_names(self, device_names):
"""Set nnadapter device name for Paddle Lite backend. """Set nnadapter device name for Paddle Lite backend.
""" """
return self._option.set_lite_nnadapter_device_names(device_names) return self._option.set_lite_device_names(device_names)
def set_lite_nnadapter_context_properties(self, context_properties): def set_lite_context_properties(self, context_properties):
"""Set nnadapter context properties for Paddle Lite backend. """Set nnadapter context properties for Paddle Lite backend.
""" """
return self._option.set_lite_nnadapter_context_properties( return self._option.set_lite_context_properties(context_properties)
context_properties)
def set_lite_nnadapter_model_cache_dir(self, model_cache_dir): def set_lite_model_cache_dir(self, model_cache_dir):
"""Set nnadapter model cache dir for Paddle Lite backend. """Set nnadapter model cache dir for Paddle Lite backend.
""" """
return self._option.set_lite_nnadapter_model_cache_dir(model_cache_dir) return self._option.set_lite_model_cache_dir(model_cache_dir)
def set_lite_nnadapter_dynamic_shape_info(self, dynamic_shape_info): def set_lite_dynamic_shape_info(self, dynamic_shape_info):
""" Set nnadapter dynamic shape info for Paddle Lite backend. """ Set nnadapter dynamic shape info for Paddle Lite backend.
""" """
return self._option.set_lite_nnadapter_dynamic_shape_info( return self._option.set_lite_dynamic_shape_info(dynamic_shape_info)
dynamic_shape_info)
def set_lite_nnadapter_subgraph_partition_path(self, def set_lite_subgraph_partition_path(self, subgraph_partition_path):
subgraph_partition_path):
""" Set nnadapter subgraph partition path for Paddle Lite backend. """ Set nnadapter subgraph partition path for Paddle Lite backend.
""" """
return self._option.set_lite_nnadapter_subgraph_partition_path( return self._option.set_lite_subgraph_partition_path(
subgraph_partition_path) subgraph_partition_path)
def set_lite_nnadapter_subgraph_partition_config_buffer( def set_lite_subgraph_partition_config_buffer(self,
self, subgraph_partition_buffer): subgraph_partition_buffer):
""" Set nnadapter subgraph partition buffer for Paddle Lite backend. """ Set nnadapter subgraph partition buffer for Paddle Lite backend.
""" """
return self._option.set_lite_nnadapter_subgraph_partition_config_buffer( return self._option.set_lite_subgraph_partition_config_buffer(
subgraph_partition_buffer) subgraph_partition_buffer)
def set_lite_nnadapter_mixed_precision_quantization_config_path( def set_lite_mixed_precision_quantization_config_path(
self, mixed_precision_quantization_config_path): self, mixed_precision_quantization_config_path):
""" Set nnadapter mixed precision quantization config path for Paddle Lite backend.. """ Set nnadapter mixed precision quantization config path for Paddle Lite backend..
""" """
return self._option.set_lite_nnadapter_mixed_precision_quantization_config_path( return self._option.set_lite_mixed_precision_quantization_config_path(
mixed_precision_quantization_config_path) mixed_precision_quantization_config_path)
def set_paddle_mkldnn(self, use_mkldnn=True): def set_paddle_mkldnn(self, use_mkldnn=True):

View File

@@ -56,8 +56,7 @@ if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
setup_configs = dict() setup_configs = dict()
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND", setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
"OFF") "OFF")
setup_configs["WITH_CANN"] = os.getenv("WITH_CANN", "OFF") setup_configs["WITH_ASCEND_PYTHON"] = os.getenv("WITH_ASCEND_PYTHON", "OFF")
setup_configs["WITH_CANN_PY"] = os.getenv("WITH_CANN_PY", "OFF")
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF") setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND", setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",
"OFF") "OFF")