[Model] 更新SCRFD模型demo以及文档 (#1108)

* 更新scrfd

* 更新scrfd
This commit is contained in:
Zheng-Bicheng
2023-01-11 13:56:17 +08:00
committed by GitHub
parent 9d288962d8
commit dd01b3ca0b
7 changed files with 78 additions and 100 deletions

View File

@@ -10,7 +10,7 @@
wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/scrfd_500m_bnkps_shape640x640.zip
unzip scrfd_500m_bnkps_shape640x640.zip
python /Path/To/FastDeploy/tools/rknpu2/export.py \
--config_path tools/rknpu2/config/scrfd.yaml \
--config_path tools/rknpu2/config/scrfd_quantized.yaml \
--target_platform rk3588
```

View File

@@ -1,36 +1,15 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.10)
project(rknpu_test)
set(CMAKE_CXX_STANDARD 14)
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
set(FASTDEPLOY_INSTALL_DIR "thirdpartys/fastdeploy-0.7.0")
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeployConfig.cmake)
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(rknpu_test infer.cc)
target_link_libraries(rknpu_test
${FastDeploy_LIBS}
)
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
install(TARGETS rknpu_test DESTINATION ./)
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
install(DIRECTORY model DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
message("${FASTDEPLOY_LIBS}")
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB ONNXRUNTIME_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/onnxruntime/lib/*)
install(PROGRAMS ${ONNXRUNTIME_LIBS} DESTINATION lib)
install(DIRECTORY ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib DESTINATION ./)
file(GLOB PADDLETOONNX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddle2onnx/lib/*)
install(PROGRAMS ${PADDLETOONNX_LIBS} DESTINATION lib)
file(GLOB RKNPU2_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/rknpu2_runtime/RK3588/lib/*)
install(PROGRAMS ${RKNPU2_LIBS} DESTINATION lib)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -1,66 +1,37 @@
[English](README.md) | 简体中文
# SCRFD C++部署示例
本目录下提供`infer.cc`快速完成SCRFD在NPU加速部署的示例。
本目录下提供`infer.cc`在RK356X上快速完成SCRFD在NPU加速部署的示例。
在部署前,需确认以下两个步骤:
1. 软硬件环境满足要求
2. 根据开发环境下载预编译部署库或者从头编译FastDeploy仓库
以上步骤请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)实现
## 生成基本目录文件
该例程由以下几个部分组成
```text
.
├── CMakeLists.txt
├── build # 编译文件夹
├── image # 存放图片的文件夹
├── infer.cc
├── model # 存放模型文件的文件夹
└── thirdpartys # 存放sdk的文件夹
```
首先需要先生成目录结构
```bash
mkdir build
mkdir images
mkdir model
mkdir thirdpartys
```
以上步骤请参考[RK2代NPU文档导航](../../../../../../docs/cn/build_and_install/rknpu2.md)实现
## 编译
### 编译并拷贝SDK到thirdpartys文件夹
请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)仓库编译SDK编译完成后将在build目录下生成
fastdeploy-0.7.0目录请移动它至thirdpartys目录下.
### 拷贝模型文件至model文件夹
请参考[SCRFD模型转换文档](../README.md)转换SCRFD ONNX模型到RKNN模型,再将RKNN模型移动到model文件夹。
### 准备测试图片至image文件夹
```bash
wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
cp test_lite_face_detector_3.jpg ./images
```
### 编译example
```bash
mkdir build
cd build
cmake ..
# 下载FastDeploy预编译库用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-rk356X-x.x.x.tgz
tar -xzvf fastdeploy-linux-aarch64-rk356X-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-aarch64-rk356X
make -j8
make install
```
## 运行例程
```bash
cd ./build/install
export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH}
./rknpu_test
#下载官方转换好的SCRFD模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/scrfd_500m_bnkps_shape640x640_rknpu2.zip
unzip scrfd_500m_bnkps_shape640x640_rknpu2.zip
wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
./infer_demo scrfd_500m_bnkps_shape640x640_rknpu2/scrfd_500m_bnkps_shape640x640_rk3568_quantized.rknn \
test_lite_face_detector_3.jpg \
1
```
运行完成可视化结果如下图所示

View File

@@ -1,16 +1,16 @@
#include <iostream>
#include <string>
#include "fastdeploy/vision.h"
void ONNXInfer(const std::string& model_dir, const std::string& image_file) {
std::string model_file = model_dir + "/scrfd_500m_bnkps_shape640x640.onnx";
void ONNXInfer(const std::string& model_file, const std::string& image_file) {
std::string params_file;
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto format = fastdeploy::ModelFormat::ONNX;
auto model = fastdeploy::vision::facedet::SCRFD(
model_file, params_file, option, format);
auto model = fastdeploy::vision::facedet::SCRFD(model_file, params_file,
option, format);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
@@ -31,19 +31,17 @@ void ONNXInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << res.Str() << std::endl;
cv::imwrite("infer_onnx.jpg", vis_im);
std::cout
<< "Visualized result saved in ./infer_onnx.jpg"
<< std::endl;
std::cout << "Visualized result saved in ./infer_onnx.jpg" << std::endl;
}
void RKNPU2Infer(const std::string& model_dir, const std::string& image_file) {
std::string model_file = model_dir + "/scrfd_500m_bnkps_shape640x640_rk3588.rknn";
void RKNPU2Infer(const std::string& model_file, const std::string& image_file) {
std::string params_file;
auto option = fastdeploy::RuntimeOption();
option.UseRKNPU2();
auto format = fastdeploy::ModelFormat::RKNN;
auto model = fastdeploy::vision::facedet::SCRFD(model_file, params_file, option, format);
auto model = fastdeploy::vision::facedet::SCRFD(model_file, params_file,
option, format);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
@@ -66,21 +64,25 @@ void RKNPU2Infer(const std::string& model_dir, const std::string& image_file) {
std::cout << res.Str() << std::endl;
cv::imwrite("infer_rknn.jpg", vis_im);
std::cout
<< "Visualized result saved in ./infer_rknn.jpg"
<< std::endl;
std::cout << "Visualized result saved in ./infer_rknn.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
<< "Usage: infer_demo path/to/model path/to/image run_option, "
"e.g ./infer_model scrfd_500m_bnkps_shape640x640.onnx ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, "
"0: run with cpu; 1: run with rknpu2."
<< std::endl;
return -1;
}
RKNPU2Infer(argv[1], argv[2]);
ONNXInfer(argv[1], argv[2]);
if (std::atoi(argv[3]) == 0) {
ONNXInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
RKNPU2Infer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -11,5 +11,5 @@ std:
model_path: ./scrfd_500m_bnkps_shape640x640.onnx
outputs_nodes:
do_quantization: True
dataset: "./datasets.txt"
dataset: "./dataset.txt"
output_folder: "./"

View File

@@ -0,0 +1,15 @@
mean:
-
- 128.5
- 128.5
- 128.5
std:
-
- 128.5
- 128.5
- 128.5
model_path: ./scrfd_500m_bnkps_shape640x640.onnx
outputs_nodes:
do_quantization: False
dataset: "./dataset.txt"
output_folder: "./"

View File

@@ -37,17 +37,24 @@ if __name__ == "__main__":
# Config
mean_values = yaml_config["mean"]
std_values = yaml_config["std"]
model.config(mean_values=mean_values, std_values=std_values, target_platform=config.target_platform)
model.config(
mean_values=mean_values,
std_values=std_values,
target_platform=config.target_platform)
# Load ONNX model
if yaml_config["outputs_nodes"] is None:
ret = model.load_onnx(model=yaml_config["model_path"])
else:
ret = model.load_onnx(model=yaml_config["model_path"], outputs=yaml_config["outputs_nodes"])
ret = model.load_onnx(
model=yaml_config["model_path"],
outputs=yaml_config["outputs_nodes"])
assert ret == 0, "Load model failed!"
# Build model
ret = model.build(do_quantization=yaml_config["do_quantization"], dataset=yaml_config["dataset"])
ret = model.build(
do_quantization=yaml_config["do_quantization"],
dataset=yaml_config["dataset"])
assert ret == 0, "Build model failed!"
# Init Runtime
@@ -60,7 +67,11 @@ if __name__ == "__main__":
model_base_name = os.path.basename(yaml_config["model_path"]).split(".")[0]
model_device_name = config.target_platform.lower()
model_save_name = model_base_name + "_" + model_device_name + ".rknn"
ret = model.export_rknn(os.path.join(yaml_config["output_folder"], model_save_name))
if yaml_config["do_quantization"]:
model_save_name = model_base_name + "_" + model_device_name + "_quantized" + ".rknn"
else:
model_save_name = model_base_name + "_" + model_device_name + "_unquantized" + ".rknn"
ret = model.export_rknn(
os.path.join(yaml_config["output_folder"], model_save_name))
assert ret == 0, "Export rknn model failed!"
print("Export OK!")