Improve Ascend

This commit is contained in:
yunyaoXYY
2022-12-20 10:58:59 +00:00
parent 7212c14113
commit 8a860734ae
12 changed files with 69 additions and 84 deletions

View File

@@ -66,7 +66,6 @@ option(ENABLE_TEXT "Whether to enable text models usage." OFF)
option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF) option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF) option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF) option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF)
option(WITH_ASCEND_PYTHON "Whether to compile for Huawei Ascend deploy using python." OFF)
option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF) option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF) option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF) option(WITH_TESTING "Whether to compile with unittest." OFF)
@@ -147,37 +146,7 @@ if (WITH_TIMVX)
endif() endif()
if (WITH_ASCEND) if (WITH_ASCEND)
if(NOT ${ENABLE_LITE_BACKEND}) include(${PROJECT_SOURCE_DIR}/cmake/ascend.cmake)
set(ENABLE_LITE_BACKEND ON)
endif()
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
message(FATAL_ERROR "Huawei Ascend NPU is supported on Linux aarch64 platform for now.")
endif()
if(NOT PADDLELITE_URL)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_1121.tgz")
endif()
endif()
if (WITH_ASCEND_PYTHON)
message(WARNING "This is only for Ascend python version")
if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON)
endif()
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
message(FATAL_ERROR "Huawei Ascend NPU is supported on Linux aarch64 platform for now.")
endif()
if(NOT PADDLELITE_URL)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_python_1207.tgz")
endif()
execute_process(COMMAND sh -c "ls *.so*" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/paddlelite/lib
COMMAND sh -c "xargs ${PATCHELF_EXE} --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/paddlelite/lib
RESULT_VARIABLE result
OUTPUT_VARIABLE curr_out
ERROR_VARIABLE curr_out)
if(ret EQUAL "1")
message(FATAL_ERROR "Failed to patchelf Paddle Lite libraries when using Ascend.")
endif()
message(STATUS "result:${result} out:${curr_out}")
endif() endif()
if (WITH_XPU) if (WITH_XPU)

View File

@@ -37,8 +37,7 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}") message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}")
message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}") message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}")
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}") message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " WITH_ASCEND : ${WITH_ASCEND}") message(STATUS " WITH_ASCEND : ${WITH_ASCEND}")
message(STATUS " WITH_ASCEND_PYTHON : ${WITH_ASCEND_PYTHON}")
message(STATUS " WITH_TIMVX : ${WITH_TIMVX}") message(STATUS " WITH_TIMVX : ${WITH_TIMVX}")
message(STATUS " WITH_XPU : ${WITH_XPU}") message(STATUS " WITH_XPU : ${WITH_XPU}")
if(ENABLE_ORT_BACKEND) if(ENABLE_ORT_BACKEND)

View File

@@ -27,7 +27,6 @@
| ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) | | ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
| ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) | | ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
| WITH_ASCEND | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON | | WITH_ASCEND | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON |
| WITH_ASCEND_PYTHON | 默认OFF当在华为昇腾NPU上,并使用Python部署时, 需要设置为ON |
| ENABLE_TIMVX | 默认OFF需要在RV1126/RV1109上部署时需设置为ON | | ENABLE_TIMVX | 默认OFF需要在RV1126/RV1109上部署时需设置为ON |
| WITH_XPU | 默认OFF当在昆仑芯XPU上部署时需设置为ON | | WITH_XPU | 默认OFF当在昆仑芯XPU上部署时需设置为ON |
| WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON | | WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON |

View File

@@ -1,9 +1,9 @@
# 华为昇腾NPU 部署环境编译准备 # 华为昇腾NPU 部署环境编译准备
FastDeploy基于 Paddle-Lite 后端, 支持在华为昇腾NPU上进行部署推理。 FastDeploy基于 Paddle-Lite 后端, 支持在华为昇腾NPU上进行部署推理。
更多详细的信息请参考:[PaddleLite部署示例](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/demo_guides/huawei_ascend_npu.md)。 更多详细的信息请参考:[Paddle Lite部署示例](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/demo_guides/huawei_ascend_npu.md)。
本文档介绍如何在ARM Linux操作系统环境下, 编译基于 PaddleLite 的 C++ 与 Python 的FastDeploy源码, 生成目标硬件为华为昇腾NPU的预测库。 本文档介绍如何在ARM Linux操作系统环境下, 编译基于 Paddle Lite 的 C++ 与 Python 的FastDeploy源码, 生成目标硬件为华为昇腾NPU的预测库。
更多编译选项请参考[FastDeploy编译选项说明](./README.md) 更多编译选项请参考[FastDeploy编译选项说明](./README.md)
@@ -46,9 +46,9 @@ $ npu-smi info
# 下载 Dockerfile # 下载 Dockerfile
$ wget https://bj.bcebos.com/fastdeploy/test/Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile $ wget https://bj.bcebos.com/fastdeploy/test/Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile
# 通过 Dockerfile 生成镜像 # 通过 Dockerfile 生成镜像
$ docker build --network=host -f Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile -t paddlelite/ascend_aarch64:cann_5.1.rc2 . $ docker build --network=host -f Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile -t Paddle Lite/ascend_aarch64:cann_5.1.rc2 .
# 创建容器 # 创建容器
$ docker run -itd --privileged --name=ascend-aarch64 --net=host -v $PWD:/Work -w /Work --device=/dev/davinci0 --device=/dev/davinci_manager --device=/dev/hisi_hdc --device /dev/devmm_svm -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/:/usr/local/Ascend/driver/ paddlelite/ascend_aarch64:cann_5.1.rc2 /bin/bash $ docker run -itd --privileged --name=ascend-aarch64 --net=host -v $PWD:/Work -w /Work --device=/dev/davinci0 --device=/dev/davinci_manager --device=/dev/hisi_hdc --device /dev/devmm_svm -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/:/usr/local/Ascend/driver/ Paddle Lite/ascend_aarch64:cann_5.1.rc2 /bin/bash
# 进入容器 # 进入容器
$ docker exec -it ascend-aarch64 /bin/bash $ docker exec -it ascend-aarch64 /bin/bash
# 确认容器的 Ascend 环境是否创建成功 # 确认容器的 Ascend 环境是否创建成功
@@ -58,9 +58,9 @@ $ npu-smi info
注意: 注意:
- 如果用户在Docker内想使用其他的CANN版本,请自行更新 Dockerfile 文件内的 CANN 下载路径, 同时更新相应的驱动和固件. 当前Dockerfile内默认为[CANN 5.1.RC2](https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%205.1.RC2/Ascend-cann-toolkit_5.1.RC2_linux-aarch64.run). - 如果用户在Docker内想使用其他的CANN版本,请自行更新 Dockerfile 文件内的 CANN 下载路径, 同时更新相应的驱动和固件. 当前Dockerfile内默认为[CANN 5.1.RC2](https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%205.1.RC2/Ascend-cann-toolkit_5.1.RC2_linux-aarch64.run).
- 如果用户不想使用docker可以参考由PaddleLite提供的[ARM Linux环境下的编译环境准备](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/source_compile/arm_linux_compile_arm_linux.rst)自行配置编译环境, 之后再自行下载并安装相应的CANN软件包来完成配置. - 如果用户不想使用docker可以参考由Paddle Lite提供的[ARM Linux环境下的编译环境准备](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/source_compile/arm_linux_compile_arm_linux.rst)自行配置编译环境, 之后再自行下载并安装相应的CANN软件包来完成配置.
## 三.基于 PaddleLite 的 C++ FastDeploy 库编译 ## 三.基于 Paddle Lite 的 C++ FastDeploy 库编译
搭建好编译环境之后,编译命令如下: 搭建好编译环境之后,编译命令如下:
```bash ```bash
# Download the latest source code # Download the latest source code
@@ -80,13 +80,13 @@ make install
``` ```
编译完成之后会在当前的build目录下生成 fastdeploy-ascend 目录,表示基于 PadddleLite 的 FastDeploy 库编译完成。 编译完成之后会在当前的build目录下生成 fastdeploy-ascend 目录,表示基于 PadddleLite 的 FastDeploy 库编译完成。
## 四.基于 PaddleLite 的 Python FastDeploy 库编译 ## 四.基于 Paddle Lite 的 Python FastDeploy 库编译
搭建好编译环境之后,编译命令如下: 搭建好编译环境之后,编译命令如下:
```bash ```bash
# Download the latest source code # Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python cd FastDeploy/python
export WITH_ASCEND_PYTHON=ON export WITH_ASCEND=ON
export ENABLE_VISION=ON export ENABLE_VISION=ON
python setup.py build python setup.py build

View File

@@ -1,9 +1,9 @@
# How to build Huawei Ascend Deployment Environment # How to build Huawei Ascend Deployment Environment
Based on the Paddle-Lite backend, FastDeploy supports model inference on Huawei's Ascend NPU. Based on the Paddle-Lite backend, FastDeploy supports model inference on Huawei's Ascend NPU.
For more detailed information, please refer to: [PaddleLite Deployment Example](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/demo_guides/huawei_ascend_npu.md). For more detailed information, please refer to: [Paddle Lite Deployment Example](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/demo_guides/huawei_ascend_npu.md).
This document describes how to compile PaddleLite-based C++ and Python FastDeploy source code under ARM Linux OS environment to generate prediction libraries for Huawei Sunrise NPU as the target hardware. This document describes how to compile Paddle Lite-based C++ and Python FastDeploy source code under ARM Linux OS environment to generate prediction libraries for Huawei Sunrise NPU as the target hardware.
For more compilation options, please refer to the [FastDeploy compilation options description](./README.md) For more compilation options, please refer to the [FastDeploy compilation options description](./README.md)
@@ -43,9 +43,9 @@ In order to ensure consistency with the FastDeploy verified build environment, i
# Download Dockerfile # Download Dockerfile
$ wget https://bj.bcebos.com/fastdeploy/test/Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile $ wget https://bj.bcebos.com/fastdeploy/test/Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile
# Create docker images # Create docker images
$ docker build --network=host -f Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile -t paddlelite/ascend_aarch64:cann_5.1.rc2 . $ docker build --network=host -f Ascend_ubuntu18.04_aarch64_5.1.rc2.Dockerfile -t Paddle Lite/ascend_aarch64:cann_5.1.rc2 .
# Create container # Create container
$ docker run -itd --privileged --name=ascend-aarch64 --net=host -v $PWD:/Work -w /Work --device=/dev/davinci0 --device=/dev/davinci_manager --device=/dev/hisi_hdc --device /dev/devmm_svm -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/:/usr/local/Ascend/driver/ paddlelite/ascend_aarch64:cann_5.1.rc2 /bin/bash $ docker run -itd --privileged --name=ascend-aarch64 --net=host -v $PWD:/Work -w /Work --device=/dev/davinci0 --device=/dev/davinci_manager --device=/dev/hisi_hdc --device /dev/devmm_svm -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/:/usr/local/Ascend/driver/ Paddle Lite/ascend_aarch64:cann_5.1.rc2 /bin/bash
# Enter the container # Enter the container
$ docker exec -it ascend-aarch64 /bin/bash $ docker exec -it ascend-aarch64 /bin/bash
# Verify that the Ascend environment for the container is created successfully # Verify that the Ascend environment for the container is created successfully
@@ -55,9 +55,9 @@ Once the above steps are successful, the user can start compiling FastDeploy dir
Note: Note:
- If you want to use another CANN version in Docker, please update the CANN download path in the Dockerfile file, and update the corresponding driver and firmware. The current default in Dockerfile is [CANN 5.1.RC2](https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%205.1.RC2/Ascend-cann-toolkit_5.1.RC2_linux-aarch64.run). - If you want to use another CANN version in Docker, please update the CANN download path in the Dockerfile file, and update the corresponding driver and firmware. The current default in Dockerfile is [CANN 5.1.RC2](https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/CANN/CANN%205.1.RC2/Ascend-cann-toolkit_5.1.RC2_linux-aarch64.run).
- If users do not want to use docker, you can refer to [Compile Environment Preparation for ARM Linux Environments](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/source_compile/arm_linux_compile_arm_linux.rst) provided by PaddleLite and configure your own compilation environment, and then download and install the proper CANN packages to complete the configuration. - If users do not want to use docker, you can refer to [Compile Environment Preparation for ARM Linux Environments](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/source_compile/arm_linux_compile_arm_linux.rst) provided by Paddle Lite and configure your own compilation environment, and then download and install the proper CANN packages to complete the configuration.
## C++ FastDeploy library compilation based on PaddleLite ## C++ FastDeploy library compilation based on Paddle Lite
After setting up the compilation environment, the compilation command is as follows. After setting up the compilation environment, the compilation command is as follows.
```bash ```bash
@@ -78,13 +78,13 @@ make install
``` ```
When the compilation is complete, the fastdeploy-ascend directory is created in the current build directory, indicating that the PadddleLite-based FastDeploy library has been compiled. When the compilation is complete, the fastdeploy-ascend directory is created in the current build directory, indicating that the PadddleLite-based FastDeploy library has been compiled.
## Compiling Python FastDeploy Libraries Based on PaddleLite ## Compiling Python FastDeploy Libraries Based on Paddle Lite
```bash ```bash
# Download the latest source code # Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python cd FastDeploy/python
export WITH_ASCEND_PYTHON=ON export WITH_ASCEND=ON
export ENABLE_VISION=ON export ENABLE_VISION=ON
python setup.py build python setup.py build

View File

@@ -70,14 +70,7 @@ bool DBDetector::BatchPredict(const std::vector<cv::Mat>& images,
return false; return false;
} }
reused_input_tensors_[0].name = InputInfoOfRuntime(0).name reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
;
for (int i = 0 ; i < reused_input_tensors_.size() ; i ++){
std::cout<<"begin to print tensor info in DET"<<std::endl;
reused_input_tensors_[i].PrintInfo("TensorInfo[i] IN DET: ") ;
}
if (!Infer(reused_input_tensors_, &reused_output_tensors_)) { if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
FDERROR << "Failed to inference by runtime." << std::endl; FDERROR << "Failed to inference by runtime." << std::endl;
return false; return false;

View File

@@ -172,6 +172,7 @@ void BindPPOCRModel(pybind11::module& m) {
.def_readwrite("mean", &vision::ocr::RecognizerPreprocessor::mean_) .def_readwrite("mean", &vision::ocr::RecognizerPreprocessor::mean_)
.def_readwrite("scale", &vision::ocr::RecognizerPreprocessor::scale_) .def_readwrite("scale", &vision::ocr::RecognizerPreprocessor::scale_)
.def_readwrite("is_scale", &vision::ocr::RecognizerPreprocessor::is_scale_) .def_readwrite("is_scale", &vision::ocr::RecognizerPreprocessor::is_scale_)
.def_readwrite("static_shape", &vision::ocr::RecognizerPreprocessor::static_shape_)
.def("run", [](vision::ocr::RecognizerPreprocessor& self, std::vector<pybind11::array>& im_list) { .def("run", [](vision::ocr::RecognizerPreprocessor& self, std::vector<pybind11::array>& im_list) {
std::vector<vision::FDMat> images; std::vector<vision::FDMat> images;
for (size_t i = 0; i < im_list.size(); ++i) { for (size_t i = 0; i < im_list.size(); ++i) {

View File

@@ -22,24 +22,35 @@ namespace vision {
namespace ocr { namespace ocr {
void OcrRecognizerResizeImage(FDMat* mat, float max_wh_ratio, void OcrRecognizerResizeImage(FDMat* mat, float max_wh_ratio,
const std::vector<int>& rec_image_shape) { const std::vector<int>& rec_image_shape, bool static_shape) {
int img_c, img_h, img_w; int img_h, img_w;
img_c = rec_image_shape[0];
img_h = rec_image_shape[1]; img_h = rec_image_shape[1];
img_w = rec_image_shape[2]; img_w = rec_image_shape[2];
img_w = int(img_h * max_wh_ratio); if (!static_shape) {
float ratio = float(mat->Width()) / float(mat->Height()); img_w = int(img_h * max_wh_ratio);
int resize_w; float ratio = float(mat->Width()) / float(mat->Height());
if (ceilf(img_h * ratio) > img_w) {
resize_w = img_w; int resize_w;
}else{ if (ceilf(img_h * ratio) > img_w) {
resize_w = int(ceilf(img_h * ratio)); resize_w = img_w;
} else {
resize_w = int(ceilf(img_h * ratio));
}
Resize::Run(mat, resize_w, img_h);
std::vector<float> value = {0, 0, 0};
Pad::Run(mat, 0, 0, 0, int(img_w - mat->Width()), value);
} else {
if (mat->Width() >= img_w) {
Resize::Run(mat, img_w, img_h); // Reszie W to 320
} else {
Resize::Run(mat, mat->Width(), img_h);
Pad::Run(mat, 0, 0, 0, int(img_w - mat->Width()), {0,0,0});
// Pad to 320
}
} }
Resize::Run(mat, resize_w, img_h);
std::vector<float> value = {0, 0, 0};
Pad::Run(mat, 0, 0, 0, int(img_w - mat->Width()), value);
} }
void OcrRecognizerResizeImageOnAscend(FDMat* mat, void OcrRecognizerResizeImageOnAscend(FDMat* mat,
@@ -91,11 +102,7 @@ bool RecognizerPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTenso
real_index = indices[i]; real_index = indices[i];
} }
FDMat* mat = &(images->at(real_index)); FDMat* mat = &(images->at(real_index));
#if defined(WITH_ASCEND) || defined(WITH_ASCEND_PYTHON) OcrRecognizerResizeImage(mat, max_wh_ratio, rec_image_shape_, static_shape_);
OcrRecognizerResizeImageOnAscend(mat, rec_image_shape_);
#else
OcrRecognizerResizeImage(mat, max_wh_ratio, rec_image_shape_);
#endif
NormalizeAndPermute::Run(mat, mean_, scale_, is_scale_); NormalizeAndPermute::Run(mat, mean_, scale_, is_scale_);
} }
// Only have 1 output Tensor. // Only have 1 output Tensor.

View File

@@ -39,6 +39,7 @@ class FASTDEPLOY_DECL RecognizerPreprocessor {
std::vector<float> mean_ = {0.5f, 0.5f, 0.5f}; std::vector<float> mean_ = {0.5f, 0.5f, 0.5f};
std::vector<float> scale_ = {0.5f, 0.5f, 0.5f}; std::vector<float> scale_ = {0.5f, 0.5f, 0.5f};
bool is_scale_ = true; bool is_scale_ = true;
bool static_shape_ = false;
}; };
} // namespace ocr } // namespace ocr

View File

@@ -85,12 +85,6 @@ bool Recognizer::BatchPredict(const std::vector<cv::Mat>& images,
return false; return false;
} }
for (int i = 0 ; i < reused_input_tensors_.size() ; i ++){
std::cout<<"begin to print tensor info in REC"<<std::endl;
reused_input_tensors_[i].PrintInfo("TensorInfo[i] IN REC: ") ;
}
reused_input_tensors_[0].name = InputInfoOfRuntime(0).name; reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
if (!Infer(reused_input_tensors_, &reused_output_tensors_)) { if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
FDERROR << "Failed to inference by runtime." << std::endl; FDERROR << "Failed to inference by runtime." << std::endl;

View File

@@ -508,6 +508,17 @@ class RecognizerPreprocessor:
""" """
return self._preprocessor.run(input_ims) return self._preprocessor.run(input_ims)
@property
def static_shape(self):
return self._preprocessor.static_shape
@static_shape.setter
def static_shape(self, value):
assert isinstance(
value,
bool), "The value to set `static_shape` must be type of bool."
self._preprocessor.static_shape = value
@property @property
def is_scale(self): def is_scale(self):
return self._preprocessor.is_scale return self._preprocessor.is_scale
@@ -626,6 +637,17 @@ class Recognizer(FastDeployModel):
def postprocessor(self, value): def postprocessor(self, value):
self._model.postprocessor = value self._model.postprocessor = value
@property
def static_shape(self):
return self._model.preprocessor.static_shape
@static_shape.setter
def static_shape(self, value):
assert isinstance(
value,
bool), "The value to set `static_shape` must be type of bool."
self._model.preprocessor.static_shape = value
@property @property
def is_scale(self): def is_scale(self):
return self._model.preprocessor.is_scale return self._model.preprocessor.is_scale

View File

@@ -56,7 +56,7 @@ if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
setup_configs = dict() setup_configs = dict()
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND", setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
"OFF") "OFF")
setup_configs["WITH_ASCEND_PYTHON"] = os.getenv("WITH_ASCEND_PYTHON", "OFF") setup_configs["WITH_ASCEND"] = os.getenv("WITH_ASCEND", "OFF")
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF") setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND", setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",
"OFF") "OFF")