Rename CANN to Ascend

This commit is contained in:
yunyaoXYY
2022-12-19 12:47:13 +00:00
parent f920096e7c
commit ae08bc8c89
30 changed files with 141 additions and 123 deletions

View File

@@ -65,8 +65,8 @@ option(ENABLE_VISION "Whether to enable vision models usage." OFF)
option(ENABLE_TEXT "Whether to enable text models usage." OFF)
option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_CANN "Whether to compile for Huawei Ascend deploy with CANN." OFF)
option(WITH_CANN_PY "Whether to compile for Huawei Ascend deploy with CANN using python." OFF)
option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF)
option(WITH_ASCEND_PYTHON "Whether to compile for Huawei Ascend deploy using python." OFF)
option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
@@ -146,7 +146,7 @@ if (WITH_TIMVX)
include(${PROJECT_SOURCE_DIR}/cmake/timvx.cmake)
endif()
if (WITH_CANN)
if (WITH_ASCEND)
if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON)
endif()
@@ -158,8 +158,8 @@ if (WITH_CANN)
endif()
endif()
if (WITH_CANN_PY)
message(WARNING "This is only for CANN python version")
if (WITH_ASCEND_PYTHON)
message(WARNING "This is only for Ascend python version")
if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON)
endif()
@@ -175,7 +175,7 @@ if (WITH_CANN_PY)
OUTPUT_VARIABLE curr_out
ERROR_VARIABLE curr_out)
if(ret EQUAL "1")
message(FATAL_ERROR "Failed to patchelf CANN libraries.")
message(FATAL_ERROR "Failed to patchelf Paddle Lite libraries when using Ascend.")
endif()
message(STATUS "result:${result} out:${curr_out}")
endif()

View File

@@ -37,8 +37,8 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}")
message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}")
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " WITH_CANN : ${WITH_CANN}")
message(STATUS " WITH_CANN_PY : ${WITH_CANN_PY}")
message(STATUS " WITH_ASCEND : ${WITH_ASCEND}")
message(STATUS " WITH_ASCEND_PYTHON : ${WITH_ASCEND_PYTHON}")
message(STATUS " WITH_TIMVX : ${WITH_TIMVX}")
message(STATUS " WITH_XPU : ${WITH_XPU}")
if(ENABLE_ORT_BACKEND)

View File

@@ -24,8 +24,8 @@
| ENABLE_PADDLE_BACKEND | 默认OFF是否编译集成Paddle Inference后端(CPU/GPU上推荐打开) |
| ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
| ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
| WITH_CANN | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON |
| WITH_CANN_PY | 默认OFF当在华为昇腾NPU上,并使用Python部署时, 需要设置为ON |
| WITH_ASCEND | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON |
| WITH_ASCEND_PYTHON | 默认OFF当在华为昇腾NPU上,并使用Python部署时, 需要设置为ON |
| ENABLE_TIMVX | 默认OFF需要在RV1126/RV1109上部署时需设置为ON |
| WITH_XPU | 默认OFF当在昆仑芯XPU上部署时需设置为ON |
| WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON |

View File

@@ -69,8 +69,8 @@ cd FastDeploy
mkdir build && cd build
# CMake configuration with CANN
cmake -DWITH_CANN=ON \
-DCMAKE_INSTALL_PREFIX=fastdeploy-cann \
cmake -DWITH_ASCEND=ON \
-DCMAKE_INSTALL_PREFIX=fastdeploy-ascend \
-DENABLE_VISION=ON \
..
@@ -78,7 +78,7 @@ cmake -DWITH_CANN=ON \
make -j8
make install
```
编译完成之后会在当前的build目录下生成 fastdeploy-cann 目录,表示基于 PadddleLite CANN 的 FastDeploy 库编译完成。
编译完成之后会在当前的build目录下生成 fastdeploy-ascend 目录,表示基于 PadddleLite 的 FastDeploy 库编译完成。
## 四.基于 PaddleLite 的 Python FastDeploy 库编译
搭建好编译环境之后,编译命令如下:
@@ -86,7 +86,7 @@ make install
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
export WITH_CANN_PY=ON
export WITH_ASCEND_PYTHON=ON
export ENABLE_VISION=ON
python setup.py build

View File

@@ -2,7 +2,7 @@ PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of fastdeploy-cann sdk")
option(FASTDEPLOY_INSTALL_DIR "Path of fastdeploy-ascend sdk")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)

View File

@@ -15,7 +15,7 @@
# 编译当前demo
mkdir build
cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=../../../../../../build/fastdeploy-cann
cmake .. -DFASTDEPLOY_INSTALL_DIR=../../../../../../build/fastdeploy-ascend
make -j8
cd ..

View File

@@ -25,9 +25,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto config_file = model_dir + sep + "inference_cls.yaml";
fastdeploy::RuntimeOption option;
option.UseCANN();
option.SetNNAdapterDeviceNames({"huawei_ascend_npu"});
option.SetNNAdapterContextProperties("HUAWEI_ASCEND_NPU_SELECTED_DEVICE_IDS=0");
option.UseAscend();
auto model = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option);

View File

@@ -2,8 +2,8 @@
export GLOG_v=5
# 设置本demo的环境变量
# 正确设置fastdeploy-cann的安装路径
FASTDEPLOY_INSTALL_DIR="../../../../../../build/fastdeploy-cann/"
# 正确设置fastdeploy-ascend的安装路径
FASTDEPLOY_INSTALL_DIR="../../../../../../build/fastdeploy-ascend/"
# 设置fastdeploy,opencv和paddlelite相关的环境变量
export LD_LIBRARY_PATH=$FASTDEPLOY_INSTALL_DIR/lib/:$FASTDEPLOY_INSTALL_DIR/third_libs/install/opencv/lib/:$FASTDEPLOY_INSTALL_DIR/third_libs/install/paddlelite/lib/:$LD_LIBRARY_PATH

View File

@@ -18,10 +18,9 @@ def parse_arguments():
def build_option():
option = fd.RuntimeOption()
option.use_cann()
option.set_lite_nnadapter_device_names(["huawei_ascend_npu"])
option = fd.RuntimeOption()
option.use_ascend()
return option

View File

@@ -89,9 +89,34 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)});
valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
}else if(option_.enable_cann){
config_.set_nnadapter_device_names(option_.nnadapter_device_names);
config_.set_nnadapter_context_properties(option_.nnadapter_context_properties);
}else if(option_.enable_ascend){
if(!option_.nnadapter_device_names.empty()){
config_.set_nnadapter_device_names(option_.nnadapter_device_names);
} else {
config_.set_nnadapter_device_names({"huawei_ascend_npu"});
}
if(!option_.nnadapter_context_properties.empty()){
config_.set_nnadapter_context_properties(nnadapter_context_properties);
}
if(!option_.nnadapter_model_cache_dir.empty()){
config_.set_nnadapter_model_cache_dir(nnadapter_model_cache_dir);
}
if(!option_.nnadapter_mixed_precision_quantization_config_path.empty()){
config_.set_nnadapter_mixed_precision_quantization_config_path(
nnadapter_mixed_precision_quantization_config_path
);
}
if(!option_.nnadapter_subgraph_partition_config_path.empty()){
config_.nnadapter_subgraph_partition_config_path(
nnadapter_subgraph_partition_config_path
);
}
valid_places.push_back(
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)});
valid_places.push_back(

View File

@@ -52,7 +52,7 @@ struct LiteBackendOption {
nnadapter_dynamic_shape_info = {{" ", {{0}}}};
std::string nnadapter_mixed_precision_quantization_config_path = "";
bool enable_timvx = false;
bool enable_cann = false;
bool enable_ascend = false;
bool enable_xpu = false;
int device_id = 0;
int xpu_l3_workspace_size = 0xfffc00;

View File

@@ -62,8 +62,8 @@ std::string Str(const Device& d) {
case Device::TIMVX:
out = "Device::TIMVX";
break;
case Device::CANN:
out = "Device::CANN";
case Device::ASCEND:
out = "Device::ASCEND";
break;
case Device::XPU:
out = "Device::XPU";
@@ -91,8 +91,8 @@ std::ostream& operator<<(std::ostream& out,const Device& d){
case Device::XPU:
out << "Device::XPU";
break;
case Device::CANN:
out << "Device::CANN";
case Device::ASCEND:
out << "Device::ASCEND";
break;
default:
out << "Device::UNKOWN";

View File

@@ -22,7 +22,7 @@
namespace fastdeploy {
enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, XPU, CANN};
enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, XPU, ASCEND};
FASTDEPLOY_DECL std::string Str(const Device& d);

View File

@@ -51,7 +51,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
bool use_ipu = (runtime_option.device == Device::IPU);
bool use_rknpu = (runtime_option.device == Device::RKNPU);
bool use_timvx = (runtime_option.device == Device::TIMVX);
bool use_cann = (runtime_option.device == Device::CANN);
bool use_ascend = (runtime_option.device == Device::ASCEND);
bool use_xpu = (runtime_option.device == Device::XPU);
if (use_gpu) {
@@ -69,9 +69,9 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if (use_cann) {
if (!IsSupported(valid_cann_backends, runtime_option.backend)) {
FDERROR << "The valid cann backends of model " << ModelName() << " are " << Str(valid_cann_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
} else if (use_ascend) {
if (!IsSupported(valid_ascend_backends, runtime_option.backend)) {
FDERROR << "The valid ascend backends of model " << ModelName() << " are " << Str(valid_ascend_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if (use_xpu) {
@@ -114,8 +114,8 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return CreateRKNPUBackend();
} else if (runtime_option.device == Device::TIMVX) {
return CreateTimVXBackend();
} else if (runtime_option.device == Device::CANN) {
return CreateCANNBackend();
} else if (runtime_option.device == Device::ASCEND) {
return CreateASCENDBackend();
} else if (runtime_option.device == Device::XPU) {
return CreateXPUBackend();
} else if (runtime_option.device == Device::IPU) {
@@ -127,7 +127,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return false;
#endif
}
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU/CANN now." << std::endl;
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU/ASCEND now." << std::endl;
return false;
}
@@ -265,18 +265,18 @@ bool FastDeployModel::CreateXPUBackend() {
}
bool FastDeployModel::CreateCANNBackend() {
if (valid_cann_backends.size() == 0) {
FDERROR << "There's no valid cann backends for model: " << ModelName()
bool FastDeployModel::CreateASCENDBackend() {
if (valid_ascend_backends.size() == 0) {
FDERROR << "There's no valid ascend backends for model: " << ModelName()
<< std::endl;
return false;
}
for (size_t i = 0; i < valid_cann_backends.size(); ++i) {
if (!IsBackendAvailable(valid_cann_backends[i])) {
for (size_t i = 0; i < valid_ascend_backends.size(); ++i) {
if (!IsBackendAvailable(valid_ascend_backends[i])) {
continue;
}
runtime_option.backend = valid_cann_backends[i];
runtime_option.backend = valid_ascend_backends[i];
runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;

View File

@@ -45,9 +45,9 @@ class FASTDEPLOY_DECL FastDeployModel {
/** Model's valid timvx backends. This member defined all the timvx backends have successfully tested for the model
*/
std::vector<Backend> valid_timvx_backends = {};
/** Model's valid cann backends. This member defined all the cann backends have successfully tested for the model
/** Model's valid ascend backends. This member defined all the cann backends have successfully tested for the model
*/
std::vector<Backend> valid_cann_backends = {};
std::vector<Backend> valid_ascend_backends = {};
/** Model's valid KunlunXin xpu backends. This member defined all the KunlunXin xpu backends have successfully tested for the model
*/
std::vector<Backend> valid_xpu_backends = {};
@@ -150,7 +150,7 @@ class FASTDEPLOY_DECL FastDeployModel {
bool CreateRKNPUBackend();
bool CreateTimVXBackend();
bool CreateXPUBackend();
bool CreateCANNBackend();
bool CreateASCENDBackend();
std::shared_ptr<Runtime> runtime_;
bool runtime_initialized_ = false;

View File

@@ -23,7 +23,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_gpu", &RuntimeOption::UseGpu)
.def("use_cpu", &RuntimeOption::UseCpu)
.def("use_rknpu2", &RuntimeOption::UseRKNPU2)
.def("use_cann", &RuntimeOption::UseCANN)
.def("use_ascend", &RuntimeOption::UseAscend)
.def("use_xpu", &RuntimeOption::UseXpu)
.def("set_external_stream", &RuntimeOption::SetExternalStream)
.def("set_cpu_thread_num", &RuntimeOption::SetCpuThreadNum)
@@ -34,13 +34,13 @@ void BindRuntime(pybind11::module& m) {
.def("use_trt_backend", &RuntimeOption::UseTrtBackend)
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
.def("use_lite_backend", &RuntimeOption::UseLiteBackend)
.def("set_lite_nnadapter_device_names", &RuntimeOption::SetLiteNNAdapterDeviceNames)
.def("set_lite_nnadapter_context_properties", &RuntimeOption::SetLiteNNAdapterContextProperties)
.def("set_lite_nnadapter_model_cache_dir", &RuntimeOption::SetLiteNNAdapterModelCacheDir)
.def("set_lite_nnadapter_dynamic_shape_info", &RuntimeOption::SetLiteNNAdapterDynamicShapeInfo)
.def("set_lite_nnadapter_subgraph_partition_path", &RuntimeOption::SetLiteNNAdapterSubgraphPartitionPath)
.def("set_lite_nnadapter_mixed_precision_quantization_config_path", &RuntimeOption::SetLiteNNAdapterMixedPrecisionQuantizationConfigPath)
.def("set_lite_nnadapter_subgraph_partition_config_buffer", &RuntimeOption::SetLiteNNAdapterSubgraphPartitionConfigBuffer)
.def("set_lite_device_names", &RuntimeOption::SetLiteDeviceNames)
.def("set_lite_context_properties", &RuntimeOption::SetLiteContextProperties)
.def("set_lite_model_cache_dir", &RuntimeOption::SetLiteModelCacheDir)
.def("set_lite_dynamic_shape_info", &RuntimeOption::SetLiteDynamicShapeInfo)
.def("set_lite_subgraph_partition_path", &RuntimeOption::SetLiteSubgraphPartitionPath)
.def("set_lite_mixed_precision_quantization_config_path", &RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath)
.def("set_lite_subgraph_partition_config_buffer", &RuntimeOption::SetLiteSubgraphPartitionConfigBuffer)
.def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN)
.def("set_openvino_device", &RuntimeOption::SetOpenVINODevice)
.def("set_openvino_shape_info", &RuntimeOption::SetOpenVINOShapeInfo)

View File

@@ -258,9 +258,9 @@ void RuntimeOption::UseXpu(int xpu_id,
device = Device::XPU;
}
void RuntimeOption::UseCANN(){
enable_cann = true;
device = Device::CANN;
void RuntimeOption::UseAscend(){
enable_ascend = true;
device = Device::ASCEND;
}
void RuntimeOption::SetExternalStream(void* external_stream) {
@@ -382,37 +382,37 @@ void RuntimeOption::SetLiteOptimizedModelDir(
lite_optimized_model_dir = optimized_model_dir;
}
void RuntimeOption::SetLiteNNAdapterSubgraphPartitionPath(
void RuntimeOption::SetLiteSubgraphPartitionPath(
const std::string& nnadapter_subgraph_partition_config_path) {
lite_nnadapter_subgraph_partition_config_path =
nnadapter_subgraph_partition_config_path;
}
void RuntimeOption::SetLiteNNAdapterSubgraphPartitionConfigBuffer(
void RuntimeOption::SetLiteSubgraphPartitionConfigBuffer(
const std::string& nnadapter_subgraph_partition_config_buffer){
lite_nnadapter_subgraph_partition_config_buffer = nnadapter_subgraph_partition_config_buffer;
}
void RuntimeOption::SetLiteNNAdapterDeviceNames(const std::vector<std::string>& nnadapter_device_names){
void RuntimeOption::SetLiteDeviceNames(const std::vector<std::string>& nnadapter_device_names){
lite_nnadapter_device_names = nnadapter_device_names;
}
void RuntimeOption::SetLiteNNAdapterContextProperties(const std::string& nnadapter_context_properties){
void RuntimeOption::SetLiteContextProperties(const std::string& nnadapter_context_properties){
lite_nnadapter_context_properties = nnadapter_context_properties;
}
void RuntimeOption::SetLiteNNAdapterModelCacheDir(const std::string& nnadapter_model_cache_dir){
void RuntimeOption::SetLiteModelCacheDir(const std::string& nnadapter_model_cache_dir){
lite_nnadapter_model_cache_dir = nnadapter_model_cache_dir;
}
void RuntimeOption::SetLiteNNAdapterDynamicShapeInfo(
void RuntimeOption::SetLiteDynamicShapeInfo(
const std::map<std::string, std::vector<std::vector<int64_t>>>&
nnadapter_dynamic_shape_info){
lite_nnadapter_dynamic_shape_info = nnadapter_dynamic_shape_info;
}
void RuntimeOption::SetLiteNNAdapterMixedPrecisionQuantizationConfigPath(
void RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath(
const std::string& nnadapter_mixed_precision_quantization_config_path){
lite_nnadapter_mixed_precision_quantization_config_path = nnadapter_mixed_precision_quantization_config_path;
}
@@ -586,7 +586,7 @@ bool Runtime::Init(const RuntimeOption& _option) {
FDINFO << "Runtime initialized with Backend::OPENVINO in "
<< Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::LITE) {
FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX || option.device == Device::XPU || option.device == Device::CANN,
FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX || option.device == Device::XPU || option.device == Device::ASCEND,
"Backend::LITE only supports Device::CPU/Device::TIMVX/Device::XPU.");
CreateLiteBackend();
FDINFO << "Runtime initialized with Backend::LITE in " << Str(option.device)
@@ -843,7 +843,7 @@ void Runtime::CreateLiteBackend() {
lite_option.nnadapter_dynamic_shape_info = option.lite_nnadapter_dynamic_shape_info;
lite_option.nnadapter_mixed_precision_quantization_config_path = option.lite_nnadapter_mixed_precision_quantization_config_path;
lite_option.enable_timvx = option.enable_timvx;
lite_option.enable_cann = option.enable_cann;
lite_option.enable_ascend = option.enable_ascend;
lite_option.enable_xpu = option.enable_xpu;
lite_option.device_id = option.device_id;
lite_option.xpu_l3_workspace_size = option.xpu_l3_workspace_size;

View File

@@ -102,8 +102,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
/// Use TimVX to inference
void UseTimVX();
/// Use CANN to inference
void UseCANN();
/// Use Huawei Ascend to inference
void UseAscend();
///
/// \brief Turn on XPU.
@@ -224,46 +224,46 @@ struct FASTDEPLOY_DECL RuntimeOption {
void SetLiteOptimizedModelDir(const std::string& optimized_model_dir);
/**
* @brief Set nnadapter subgraph partition path for Paddle Lite backend.
* @brief Set subgraph partition path for Paddle Lite backend.
*/
void SetLiteNNAdapterSubgraphPartitionPath(
void SetLiteSubgraphPartitionPath(
const std::string& nnadapter_subgraph_partition_config_path);
/**
* @brief Set nnadapter subgraph partition path for Paddle Lite backend.
* @brief Set subgraph partition path for Paddle Lite backend.
*/
void SetLiteNNAdapterSubgraphPartitionConfigBuffer(
void SetLiteSubgraphPartitionConfigBuffer(
const std::string& nnadapter_subgraph_partition_config_buffer);
/**
* @brief Set nnadapter device name for Paddle Lite backend.
* @brief Set device name for Paddle Lite backend.
*/
void SetLiteNNAdapterDeviceNames(
void SetLiteDeviceNames(
const std::vector<std::string>& nnadapter_device_names);
/**
* @brief Set nnadapter context properties for Paddle Lite backend.
* @brief Set context properties for Paddle Lite backend.
*/
void SetLiteNNAdapterContextProperties(
void SetLiteContextProperties(
const std::string& nnadapter_context_properties);
/**
* @brief Set nnadapter model cache dir for Paddle Lite backend.
* @brief Set model cache dir for Paddle Lite backend.
*/
void SetLiteNNAdapterModelCacheDir(
void SetLiteModelCacheDir(
const std::string& nnadapter_model_cache_dir);
/**
* @brief Set nnadapter dynamic shape info for Paddle Lite backend.
* @brief Set dynamic shape info for Paddle Lite backend.
*/
void SetLiteNNAdapterDynamicShapeInfo(
void SetLiteDynamicShapeInfo(
const std::map<std::string, std::vector<std::vector<int64_t>>>&
nnadapter_dynamic_shape_info);
/**
* @brief Set nnadapter mixed precision quantization config path for Paddle Lite backend.
* @brief Set mixed precision quantization config path for Paddle Lite backend.
*/
void SetLiteNNAdapterMixedPrecisionQuantizationConfigPath(
void SetLiteMixedPrecisionQuantizationConfigPath(
const std::string& nnadapter_mixed_precision_quantization_config_path);
/**
@@ -434,7 +434,7 @@ struct FASTDEPLOY_DECL RuntimeOption {
std::string lite_nnadapter_mixed_precision_quantization_config_path = "";
bool enable_timvx = false;
bool enable_cann = false;
bool enable_ascend = false;
bool enable_xpu = false;
// ======Only for Trt Backend=======

View File

@@ -29,7 +29,7 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
valid_xpu_backends = {Backend::LITE};
valid_ipu_backends = {Backend::PDINFER};
} else if (model_format == ModelFormat::ONNX) {

View File

@@ -29,7 +29,7 @@ YOLOv5::YOLOv5(const std::string& model_file, const std::string& params_file,
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -72,7 +72,7 @@ YOLOv6::YOLOv6(const std::string& model_file, const std::string& params_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -27,7 +27,7 @@ YOLOv7::YOLOv7(const std::string& model_file, const std::string& params_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -39,7 +39,7 @@ class FASTDEPLOY_DECL PicoDet : public PPDetBase {
Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -66,7 +66,7 @@ class FASTDEPLOY_DECL PPYOLOE : public PPDetBase {
Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -91,7 +91,7 @@ class FASTDEPLOY_DECL PPYOLO : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -109,7 +109,7 @@ class FASTDEPLOY_DECL YOLOv3 : public PPDetBase {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -127,7 +127,7 @@ class FASTDEPLOY_DECL PaddleYOLOX : public PPDetBase {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -176,7 +176,7 @@ class FASTDEPLOY_DECL SSD : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}

View File

@@ -32,7 +32,7 @@ Classifier::Classifier(const std::string& model_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -32,7 +32,7 @@ DBDetector::DBDetector(const std::string& model_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;

View File

@@ -91,7 +91,7 @@ bool RecognizerPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTenso
real_index = indices[i];
}
FDMat* mat = &(images->at(real_index));
#if defined(WITH_CANN) || defined(WITH_CANN_PY)
#if defined(WITH_ASCEND) || defined(WITH_ASCEND_PYTHON)
OcrRecognizerResizeImageOnAscend(mat, rec_image_shape_);
#else
OcrRecognizerResizeImage(mat, max_wh_ratio, rec_image_shape_);

View File

@@ -34,7 +34,7 @@ Recognizer::Recognizer(const std::string& model_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;

View File

@@ -29,7 +29,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2};
valid_timvx_backends = {Backend::LITE};
valid_cann_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;

View File

@@ -283,10 +283,10 @@ class RuntimeOption:
rknpu2_core=rknpu2.CoreMask.RKNN_NPU_CORE_0):
return self._option.use_rknpu2(rknpu2_name, rknpu2_core)
def use_cann(self):
def use_ascend(self):
"""Inference with Huawei Ascend NPU
"""
return self._option.use_cann()
return self._option.use_ascend()
def set_cpu_thread_num(self, thread_num=-1):
"""Set number of threads if inference with CPU
@@ -342,47 +342,44 @@ class RuntimeOption:
"""
return self.use_lite_backend()
def set_lite_nnadapter_device_names(self, device_names):
def set_lite_device_names(self, device_names):
"""Set nnadapter device name for Paddle Lite backend.
"""
return self._option.set_lite_nnadapter_device_names(device_names)
return self._option.set_lite_device_names(device_names)
def set_lite_nnadapter_context_properties(self, context_properties):
def set_lite_context_properties(self, context_properties):
"""Set nnadapter context properties for Paddle Lite backend.
"""
return self._option.set_lite_nnadapter_context_properties(
context_properties)
return self._option.set_lite_context_properties(context_properties)
def set_lite_nnadapter_model_cache_dir(self, model_cache_dir):
def set_lite_model_cache_dir(self, model_cache_dir):
"""Set nnadapter model cache dir for Paddle Lite backend.
"""
return self._option.set_lite_nnadapter_model_cache_dir(model_cache_dir)
return self._option.set_lite_model_cache_dir(model_cache_dir)
def set_lite_nnadapter_dynamic_shape_info(self, dynamic_shape_info):
def set_lite_dynamic_shape_info(self, dynamic_shape_info):
""" Set nnadapter dynamic shape info for Paddle Lite backend.
"""
return self._option.set_lite_nnadapter_dynamic_shape_info(
dynamic_shape_info)
return self._option.set_lite_dynamic_shape_info(dynamic_shape_info)
def set_lite_nnadapter_subgraph_partition_path(self,
subgraph_partition_path):
def set_lite_subgraph_partition_path(self, subgraph_partition_path):
""" Set nnadapter subgraph partition path for Paddle Lite backend.
"""
return self._option.set_lite_nnadapter_subgraph_partition_path(
return self._option.set_lite_subgraph_partition_path(
subgraph_partition_path)
def set_lite_nnadapter_subgraph_partition_config_buffer(
self, subgraph_partition_buffer):
def set_lite_subgraph_partition_config_buffer(self,
subgraph_partition_buffer):
""" Set nnadapter subgraph partition buffer for Paddle Lite backend.
"""
return self._option.set_lite_nnadapter_subgraph_partition_config_buffer(
return self._option.set_lite_subgraph_partition_config_buffer(
subgraph_partition_buffer)
def set_lite_nnadapter_mixed_precision_quantization_config_path(
def set_lite_mixed_precision_quantization_config_path(
self, mixed_precision_quantization_config_path):
""" Set nnadapter mixed precision quantization config path for Paddle Lite backend..
"""
return self._option.set_lite_nnadapter_mixed_precision_quantization_config_path(
return self._option.set_lite_mixed_precision_quantization_config_path(
mixed_precision_quantization_config_path)
def set_paddle_mkldnn(self, use_mkldnn=True):

View File

@@ -56,8 +56,7 @@ if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
setup_configs = dict()
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
"OFF")
setup_configs["WITH_CANN"] = os.getenv("WITH_CANN", "OFF")
setup_configs["WITH_CANN_PY"] = os.getenv("WITH_CANN_PY", "OFF")
setup_configs["WITH_ASCEND_PYTHON"] = os.getenv("WITH_ASCEND_PYTHON", "OFF")
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",
"OFF")