diff --git a/examples/vision/detection/paddledetection/rknpu2/README.md b/examples/vision/detection/paddledetection/rknpu2/README.md index d5f339db5..98f1ada10 100644 --- a/examples/vision/detection/paddledetection/rknpu2/README.md +++ b/examples/vision/detection/paddledetection/rknpu2/README.md @@ -113,5 +113,7 @@ Preprocess: type: Resize ``` +## 其他链接 +- [Cpp部署](./cpp) - [Python部署](./python) - [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/rkyolo/README.md b/examples/vision/detection/rkyolo/README.md new file mode 100644 index 000000000..015e22506 --- /dev/null +++ b/examples/vision/detection/rkyolo/README.md @@ -0,0 +1,18 @@ +# RKYOLO准备部署模型 + +RKYOLO参考[rknn_model_zoo](https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo)的代码 +对RKYOLO系列模型进行了封装,目前支持RKYOLOV5系列模型的部署。 + +## 支持模型列表 + +* RKYOLOV5 + +## 模型转换example + +请参考[RKNN_model_convert](https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo/RKNN_model_convert) + + +## 其他链接 +- [Cpp部署](./cpp) +- [Python部署](./python) +- [视觉模型预测结果](../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/rkyolo/cpp/CMakeLists.txt b/examples/vision/detection/rkyolo/cpp/CMakeLists.txt new file mode 100644 index 000000000..524b94fea --- /dev/null +++ b/examples/vision/detection/rkyolo/cpp/CMakeLists.txt @@ -0,0 +1,37 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.10) +project(rknpu2_test) + +set(CMAKE_CXX_STANDARD 14) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR "thirdpartys/fastdeploy-0.0.3") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeployConfig.cmake) +include_directories(${FastDeploy_INCLUDE_DIRS}) + +add_executable(infer_rkyolo infer_rkyolo.cc) +target_link_libraries(infer_rkyolo ${FastDeploy_LIBS}) + + + +set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install) + +install(TARGETS infer_rkyolo DESTINATION ./) + +install(DIRECTORY model DESTINATION ./) +install(DIRECTORY images DESTINATION ./) + +file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*) +message("${FASTDEPLOY_LIBS}") +install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib) + +file(GLOB ONNXRUNTIME_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/onnxruntime/lib/*) +install(PROGRAMS ${ONNXRUNTIME_LIBS} DESTINATION lib) + +install(DIRECTORY ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib DESTINATION ./) + +file(GLOB PADDLETOONNX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddle2onnx/lib/*) +install(PROGRAMS ${PADDLETOONNX_LIBS} DESTINATION lib) + +file(GLOB RKNPU2_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/rknpu2_runtime/${RKNN2_TARGET_SOC}/lib/*) +install(PROGRAMS ${RKNPU2_LIBS} DESTINATION lib) diff --git a/examples/vision/detection/rkyolo/cpp/README.md b/examples/vision/detection/rkyolo/cpp/README.md new file mode 100644 index 000000000..16f8df72d --- /dev/null +++ b/examples/vision/detection/rkyolo/cpp/README.md @@ -0,0 +1,69 @@ +# RKYOLO C++部署示例 + +本目录下提供`infer_xxxxx.cc`快速完成RKYOLO模型在Rockchip板子上上通过二代NPU加速部署的示例。 + +在部署前,需确认以下两个步骤: + +1. 软硬件环境满足要求 +2. 根据开发环境,下载预编译部署库或者从头编译FastDeploy仓库 + +以上步骤请参考[RK2代NPU部署库编译](../../../../../docs/cn/build_and_install/rknpu2.md)实现 + +## 生成基本目录文件 + +该例程由以下几个部分组成 +```text +. +├── CMakeLists.txt +├── build # 编译文件夹 +├── image # 存放图片的文件夹 +├── infer_rkyolo.cc +├── model # 存放模型文件的文件夹 +└── thirdpartys # 存放sdk的文件夹 +``` + +首先需要先生成目录结构 +```bash +mkdir build +mkdir images +mkdir model +mkdir thirdpartys +``` + +## 编译 + +### 编译并拷贝SDK到thirdpartys文件夹 + +请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)仓库编译SDK,编译完成后,将在build目录下生成 +fastdeploy-0.0.3目录,请移动它至thirdpartys目录下. + +### 拷贝模型文件,以及配置文件至model文件夹 +在Paddle动态图模型 -> Paddle静态图模型 -> ONNX模型的过程中,将生成ONNX文件以及对应的yaml配置文件,请将配置文件存放到model文件夹内。 +转换为RKNN后的模型文件也需要拷贝至model。 + +### 准备测试图片至image文件夹 +```bash +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg +cp 000000014439.jpg ./images +``` + +### 编译example + +```bash +cd build +cmake .. +make -j8 +make install +``` + +## 运行例程 + +```bash +cd ./build/install +./infer_picodet model/ images/000000014439.jpg +``` + + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/rkyolo/cpp/infer_rkyolo.cc b/examples/vision/detection/rkyolo/cpp/infer_rkyolo.cc new file mode 100644 index 000000000..c7c1be43b --- /dev/null +++ b/examples/vision/detection/rkyolo/cpp/infer_rkyolo.cc @@ -0,0 +1,53 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "fastdeploy/vision.h" + +void RKNPU2Infer(const std::string& model_file, const std::string& image_file) { + struct timeval start_time, stop_time; + + auto option = fastdeploy::RuntimeOption(); + option.UseRKNPU2(); + + auto format = fastdeploy::ModelFormat::RKNN; + + auto model = fastdeploy::vision::detection::RKYOLOV5( + model_file, option,format); + + auto im = cv::imread(image_file); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + std::cout << res.Str() << std::endl; + auto vis_im = fastdeploy::vision::VisDetection(im, res,0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 3) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./picodet_model_dir ./test.jpeg" + << std::endl; + return -1; + } + + RKNPU2Infer(argv[1], argv[2]); + + return 0; +} + diff --git a/examples/vision/detection/rkyolo/python/README.md b/examples/vision/detection/rkyolo/python/README.md new file mode 100644 index 000000000..4549ec0f9 --- /dev/null +++ b/examples/vision/detection/rkyolo/python/README.md @@ -0,0 +1,34 @@ +# RKYOLO Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/rknpu2.md) + +本目录下提供`infer.py`快速完成Picodet在RKNPU上部署的示例。执行如下脚本即可完成 + +```bash +# 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/detection/rkyolo/python + +# 下载图片 +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + +# copy model +cp -r ./model /path/to/FastDeploy/examples/vision/detection/rkyolo/python + +# 推理 +python3 infer.py --model_file ./model/ \ + --image 000000014439.jpg +``` + + +## 注意事项 +RKNPU上对模型的输入要求是使用NHWC格式,且图片归一化操作会在转RKNN模型时,内嵌到模型中,因此我们在使用FastDeploy部署时, + +## 其它文档 + +- [PaddleDetection 模型介绍](..) +- [PaddleDetection C++部署](../cpp) +- [模型预测结果说明](../../../../../../docs/api/vision_results/) +- [转换PaddleDetection RKNN模型文档](../README.md) diff --git a/examples/vision/detection/rkyolo/python/infer.py b/examples/vision/detection/rkyolo/python/infer.py new file mode 100644 index 000000000..38eea0e8c --- /dev/null +++ b/examples/vision/detection/rkyolo/python/infer.py @@ -0,0 +1,53 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_file", required=True, help="Path of rknn model.") + parser.add_argument( + "--image", type=str, required=True, help="Path of test image file.") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_arguments() + + model_file = args.model_file + params_file = "" + + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + runtime_option.use_rknpu2() + + model = fd.vision.detection.RKYOLOV5( + model_file, + runtime_option=runtime_option, + model_format=fd.ModelFormat.RKNN) + + # 预测图片分割结果 + im = cv2.imread(args.image) + result = model.predict(im) + print(result) + + # 可视化结果 + vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) + cv2.imwrite("visualized_result.jpg", vis_im) + print("Visualized result save in ./visualized_result.jpg") diff --git a/fastdeploy/backends/rknpu/rknpu2/rknpu2_backend.cc b/fastdeploy/backends/rknpu/rknpu2/rknpu2_backend.cc index 16edf7561..b577c2791 100644 --- a/fastdeploy/backends/rknpu/rknpu2/rknpu2_backend.cc +++ b/fastdeploy/backends/rknpu/rknpu2/rknpu2_backend.cc @@ -190,6 +190,8 @@ bool RKNPU2Backend::GetModelInputOutputInfos() { FDERROR << "rknpu2_backend only support input format is NHWC or UNDEFINED" << std::endl; } + DumpTensorAttr(input_attrs_[i]); + // copy input_attrs_ to input tensor info std::string temp_name = input_attrs_[i].name; std::vector temp_shape{}; @@ -234,6 +236,8 @@ bool RKNPU2Backend::GetModelInputOutputInfos() { << std::endl; } + DumpTensorAttr(output_attrs_[i]); + // copy output_attrs_ to output tensor std::string temp_name = output_attrs_[i].name; std::vector temp_shape{}; @@ -342,7 +346,6 @@ bool RKNPU2Backend::Infer(std::vector& inputs, return false; } // default output type is depend on model, this requires float32 to compute top5 - output_attrs_[i].type = RKNN_TENSOR_FLOAT32; ret = rknn_set_io_mem(ctx, output_mems_[i], &output_attrs_[i]); // set output memory and attribute if (ret != RKNN_SUCC) { @@ -389,6 +392,8 @@ bool RKNPU2Backend::Infer(std::vector& inputs, } (*outputs)[i].Resize(temp_shape, outputs_desc_[i].dtype, outputs_desc_[i].name); + std::vector output_scale = {output_attrs_[i].scale}; + (*outputs)[i].SetQuantizationInfo(output_attrs_[i].zp, output_scale); memcpy((*outputs)[i].MutableData(), (float*)output_mems_[i]->virt_addr, (*outputs)[i].Nbytes()); } diff --git a/fastdeploy/core/fd_tensor.cc b/fastdeploy/core/fd_tensor.cc index e84535ac9..484e03913 100644 --- a/fastdeploy/core/fd_tensor.cc +++ b/fastdeploy/core/fd_tensor.cc @@ -138,6 +138,11 @@ void FDTensor::Resize(const std::vector& new_shape) { external_data_ptr = nullptr; } +void FDTensor::SetQuantizationInfo(int32_t zero_point,std::vector& scale){ + quantized_parameter_.first = zero_point; + quantized_parameter_.second = scale; +} + void FDTensor::Resize(const std::vector& new_shape, const FDDataType& data_type, const std::string& tensor_name, @@ -450,4 +455,9 @@ FDTensor& FDTensor::operator=(FDTensor&& other) { return *this; } +const std::pair> +FDTensor::GetQuantizationInfo() const{ + return quantized_parameter_; +} + } // namespace fastdeploy diff --git a/fastdeploy/core/fd_tensor.h b/fastdeploy/core/fd_tensor.h index 3c79b0c88..e3e373c43 100644 --- a/fastdeploy/core/fd_tensor.h +++ b/fastdeploy/core/fd_tensor.h @@ -25,6 +25,11 @@ namespace fastdeploy { struct FASTDEPLOY_DECL FDTensor { + // These two parameters are general parameters of quantitative model. + std::pair> quantized_parameter_ = {0, {0}}; + void SetQuantizationInfo(int32_t zero_point, std::vector& scale); + const std::pair> GetQuantizationInfo() const; + // std::vector data; void* buffer_ = nullptr; std::vector shape = {0}; diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index 9bea1550e..5ccaeb2dd 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -28,6 +28,7 @@ #include "fastdeploy/vision/detection/contrib/yolov7end2end_ort.h" #include "fastdeploy/vision/detection/contrib/yolov7end2end_trt.h" #include "fastdeploy/vision/detection/contrib/yolox.h" +#include "fastdeploy/vision/detection/contrib/rknpu2/model.h" #include "fastdeploy/vision/detection/ppdet/model.h" #include "fastdeploy/vision/facealign/contrib/face_landmark_1000.h" #include "fastdeploy/vision/facealign/contrib/pfld.h" diff --git a/fastdeploy/vision/detection/contrib/rknpu2/model.h b/fastdeploy/vision/detection/contrib/rknpu2/model.h new file mode 100644 index 000000000..9a0fd423d --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/model.h @@ -0,0 +1,92 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h" +namespace fastdeploy { +namespace vision { +namespace detection { + +class FASTDEPLOY_DECL RKYOLOV5 : public RKYOLO { + public: + /** \brief Set path of model file and configuration file, and the configuration of runtime + * + * \param[in] model_file Path of model file, e.g picodet/model.pdmodel + * \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored + * \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml + * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends` + * \param[in] model_format Model format of the loaded model, default is Paddle format + */ + RKYOLOV5(const std::string& model_file, + const RuntimeOption& custom_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::RKNN) + : RKYOLO(model_file, custom_option, model_format) { + valid_cpu_backends = {}; + valid_gpu_backends = {}; + valid_rknpu_backends = {Backend::RKNPU2}; + GetPostprocessor().SetModelType(ModelType::RKYOLOV5); + } + + virtual std::string ModelName() const { return "RKYOLOV5"; } +}; + +class FASTDEPLOY_DECL RKYOLOV7 : public RKYOLO { + public: + /** \brief Set path of model file and configuration file, and the configuration of runtime + * + * \param[in] model_file Path of model file, e.g picodet/model.pdmodel + * \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored + * \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml + * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends` + * \param[in] model_format Model format of the loaded model, default is Paddle format + */ + RKYOLOV7(const std::string& model_file, + const RuntimeOption& custom_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::RKNN) + : RKYOLO(model_file, custom_option, model_format) { + valid_cpu_backends = {}; + valid_gpu_backends = {}; + valid_rknpu_backends = {Backend::RKNPU2}; + GetPostprocessor().SetModelType(ModelType::RKYOLOV7); + } + + virtual std::string ModelName() const { return "RKYOLOV7"; } +}; + +class FASTDEPLOY_DECL RKYOLOX : public RKYOLO { + public: + /** \brief Set path of model file and configuration file, and the configuration of runtime + * + * \param[in] model_file Path of model file, e.g picodet/model.pdmodel + * \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored + * \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml + * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends` + * \param[in] model_format Model format of the loaded model, default is Paddle format + */ + RKYOLOX(const std::string& model_file, + const RuntimeOption& custom_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::RKNN) + : RKYOLO(model_file, custom_option, model_format) { + valid_cpu_backends = {}; + valid_gpu_backends = {}; + valid_rknpu_backends = {Backend::RKNPU2}; + GetPostprocessor().SetModelType(ModelType::RKYOLOX); + } + + virtual std::string ModelName() const { return "RKYOLOV7"; } +}; + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/postprocessor.cc b/fastdeploy/vision/detection/contrib/rknpu2/postprocessor.cc new file mode 100755 index 000000000..bb46eff5c --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/postprocessor.cc @@ -0,0 +1,239 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace detection { + +RKYOLOPostprocessor::RKYOLOPostprocessor() {} + +void RKYOLOPostprocessor::SetModelType(ModelType model_type) { + model_type_ = model_type; + if (model_type == RKYOLOV5) { + anchors_ = {10, 13, 16, 30, 33, 23, 30, 61, 62, + 45, 59, 119, 116, 90, 156, 198, 373, 326}; + anchor_per_branch_ = 3; + } else if (model_type == RKYOLOX) { + anchors_ = {10, 13, 16, 30, 33, 23, 30, 61, 62, + 45, 59, 119, 116, 90, 156, 198, 373, 326}; + anchor_per_branch_ = 1; + } else if (model_type == RKYOLOV7) { + anchors_ = {12, 16, 19, 36, 40, 28, 36, 75, 76, + 55, 72, 146, 142, 110, 192, 243, 459, 401}; + anchor_per_branch_ = 3; + } else { + return; + } +} + +bool RKYOLOPostprocessor::Run(const std::vector& tensors, + std::vector* results) { + if (model_type_ == ModelType::UNKNOWN) { + FDERROR << "RKYOLO Only Support YOLOV5,YOLOV7,YOLOX" << std::endl; + return false; + } + + results->resize(tensors[0].shape[0]); + for (int num = 0; num < tensors[0].shape[0]; ++num) { + int validCount = 0; + std::vector filterBoxes; + std::vector boxesScore; + std::vector classId; + for (int i = 0; i < tensors.size(); i++) { + auto tensor_shape = tensors[i].shape; + auto skip_num = std::accumulate(tensor_shape.begin(), tensor_shape.end(), + 1, std::multiplies()); + int skip_address = num * skip_num; + int stride = strides_[i]; + int grid_h = height_ / stride; + int grid_w = width_ / stride; + int* anchor = &(anchors_.data()[i * 2 * anchor_per_branch_]); + if (tensors[i].dtype == FDDataType::INT8 || tensors[i].dtype == FDDataType::UINT8) { + auto quantization_info = tensors[i].GetQuantizationInfo(); + validCount = validCount + + ProcessInt8((int8_t*)tensors[i].Data() + skip_address, + anchor, grid_h, grid_w, stride, filterBoxes, + boxesScore, classId, conf_threshold_, + quantization_info.first, quantization_info.second[0]); + } else { + FDERROR << "RKYOLO Only Support INT8 Model" << std::endl; + } + } + + // no object detect + if (validCount <= 0) { + FDINFO << "The number of object detect is 0." << std::endl; + return true; + } + + std::vector indexArray; + for (int i = 0; i < validCount; ++i) { + indexArray.push_back(i); + } + + QuickSortIndiceInverse(boxesScore, 0, validCount - 1, indexArray); + + if (model_type_ == RKYOLOV5 || model_type_ == RKYOLOV7) { + NMS(validCount, filterBoxes, classId, indexArray, nms_threshold_, false); + } else if (model_type_ == RKYOLOX) { + NMS(validCount, filterBoxes, classId, indexArray, nms_threshold_, true); + } + + int last_count = 0; + (*results)[num].Clear(); + (*results)[num].Reserve(validCount); + + /* box valid detect target */ + for (int i = 0; i < validCount; ++i) { + if (indexArray[i] == -1 || boxesScore[i] < conf_threshold_ || + last_count >= obj_num_bbox_max_size) { + continue; + } + int n = indexArray[i]; + float x1 = filterBoxes[n * 4 + 0]; + float y1 = filterBoxes[n * 4 + 1]; + float x2 = x1 + filterBoxes[n * 4 + 2]; + float y2 = y1 + filterBoxes[n * 4 + 3]; + int id = classId[n]; + (*results)[num].boxes.emplace_back(std::array{ + (float)((clamp(x1, 0, width_) - pad_hw_values_[num][1] / 2) / + scale_[num]), + (float)((clamp(y1, 0, height_) - pad_hw_values_[num][0] / 2) / + scale_[num]), + (float)((clamp(x2, 0, width_) - pad_hw_values_[num][1] / 2) / + scale_[num]), + (float)((clamp(y2, 0, height_) - pad_hw_values_[num][0] / 2) / + scale_[0])}); + (*results)[num].label_ids.push_back(id); + (*results)[num].scores.push_back(boxesScore[i]); + last_count++; + } + std::cout << "last_count" << last_count << std::endl; + } + return true; +} + +int RKYOLOPostprocessor::ProcessInt8(int8_t* input, int* anchor, int grid_h, + int grid_w, int stride, + std::vector& boxes, + std::vector& boxScores, + std::vector& classId, float threshold, + int32_t zp, float scale) { + int validCount = 0; + int grid_len = grid_h * grid_w; + float thres = threshold; + auto thres_i8 = QntF32ToAffine(thres, zp, scale); + for (int a = 0; a < anchor_per_branch_; a++) { + for (int i = 0; i < grid_h; i++) { + for (int j = 0; j < grid_w; j++) { + int8_t box_confidence = + input[(prob_box_size * a + 4) * grid_len + i * grid_w + j]; + if (box_confidence >= thres_i8) { + int offset = (prob_box_size * a) * grid_len + i * grid_w + j; + int8_t* in_ptr = input + offset; + + int8_t maxClassProbs = in_ptr[5 * grid_len]; + int maxClassId = 0; + for (int k = 1; k < obj_class_num; ++k) { + int8_t prob = in_ptr[(5 + k) * grid_len]; + if (prob > maxClassProbs) { + maxClassId = k; + maxClassProbs = prob; + } + } + + float box_conf_f32 = DeqntAffineToF32(box_confidence, zp, scale); + float class_prob_f32 = DeqntAffineToF32(maxClassProbs, zp, scale); + float limit_score = 0; + if (model_type_ == RKYOLOX) { + limit_score = box_conf_f32 * class_prob_f32; + } else { + limit_score = class_prob_f32; + } + //printf("limit score: %f\n", limit_score); + if (limit_score > conf_threshold_) { + float box_x, box_y, box_w, box_h; + if (model_type_ == RKYOLOX) { + box_x = DeqntAffineToF32(*in_ptr, zp, scale); + box_y = DeqntAffineToF32(in_ptr[grid_len], zp, scale); + box_w = DeqntAffineToF32(in_ptr[2 * grid_len], zp, scale); + box_h = DeqntAffineToF32(in_ptr[3 * grid_len], zp, scale); + box_w = exp(box_w) * stride; + box_h = exp(box_h) * stride; + } else { + box_x = DeqntAffineToF32(*in_ptr, zp, scale) * 2.0 - 0.5; + box_y = DeqntAffineToF32(in_ptr[grid_len], zp, scale) * 2.0 - 0.5; + box_w = DeqntAffineToF32(in_ptr[2 * grid_len], zp, scale) * 2.0; + box_h = DeqntAffineToF32(in_ptr[3 * grid_len], zp, scale) * 2.0; + box_w = box_w * box_w; + box_h = box_h * box_h; + } + box_x = (box_x + j) * (float)stride; + box_y = (box_y + i) * (float)stride; + box_w *= (float)anchor[a * 2]; + box_h *= (float)anchor[a * 2 + 1]; + box_x -= (box_w / 2.0); + box_y -= (box_h / 2.0); + + boxes.push_back(box_x); + boxes.push_back(box_y); + boxes.push_back(box_w); + boxes.push_back(box_h); + boxScores.push_back(box_conf_f32 * class_prob_f32); + classId.push_back(maxClassId); + validCount++; + } + } + } + } + } + return validCount; +} + +int RKYOLOPostprocessor::QuickSortIndiceInverse(std::vector& input, + int left, int right, + std::vector& indices) { + float key; + int key_index; + int low = left; + int high = right; + if (left < right) { + key_index = indices[left]; + key = input[left]; + while (low < high) { + while (low < high && input[high] <= key) { + high--; + } + input[low] = input[high]; + indices[low] = indices[high]; + while (low < high && input[low] >= key) { + low++; + } + input[high] = input[low]; + indices[high] = indices[low]; + } + input[low] = key; + indices[low] = key_index; + QuickSortIndiceInverse(input, left, low - 1, indices); + QuickSortIndiceInverse(input, low + 1, right, indices); + } + return low; +} + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h b/fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h new file mode 100755 index 000000000..0332b2efd --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h @@ -0,0 +1,105 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" +#include "fastdeploy/vision/detection/contrib/rknpu2/utils.h" +#include +namespace fastdeploy { +namespace vision { +namespace detection { +/*! @brief Postprocessor object for YOLOv5 serials model. + */ +class FASTDEPLOY_DECL RKYOLOPostprocessor { + public: + /** \brief Create a postprocessor instance for YOLOv5 serials model + */ + RKYOLOPostprocessor(); + + /** \brief Process the result of runtime and fill to DetectionResult structure + * + * \param[in] tensors The inference result from runtime + * \param[in] result The output result of detection + * \param[in] ims_info The shape info list, record input_shape and output_shape + * \return true if the postprocess successed, otherwise false + */ + bool Run(const std::vector& tensors, + std::vector* results); + + /// Set nms_threshold, default 0.45 + void SetNMSThreshold(const float& nms_threshold) { + nms_threshold_ = nms_threshold; + } + + /// Set conf_threshold, default 0.25 + void SetConfThreshold(const float& conf_threshold) { + conf_threshold_ = conf_threshold; + } + + /// Get conf_threshold, default 0.25 + float GetConfThreshold() const { return conf_threshold_; } + + /// Get nms_threshold, default 0.45 + float GetNMSThreshold() const { return nms_threshold_; } + + // Set model_type + void SetModelType(ModelType model_type); + + // Set height and weight + void SetHeightAndWeight(int& height, int& width) { + height_ = height; + width_ = width; + } + + // Set pad_hw_values + void SetPadHWValues(std::vector> pad_hw_values) { + pad_hw_values_ = pad_hw_values; + } + + // Set scale + void SetScale(std::vector scale) { scale_ = scale; } + + private: + ModelType model_type_ = ModelType::UNKNOWN; + std::vector anchors_ = {10, 13, 16, 30, 33, 23, 30, 61, 62, + 45, 59, 119, 116, 90, 156, 198, 373, 326}; + int strides_[3] = {8, 16, 32}; + int height_ = 0; + int width_ = 0; + int anchor_per_branch_ = 0; + + // Process Int8 Model + int ProcessInt8(int8_t* input, int* anchor, int grid_h, int grid_w, + int stride, std::vector& boxes, + std::vector& boxScores, std::vector& classId, + float threshold, int32_t zp, float scale); + + // Model + int QuickSortIndiceInverse(std::vector& input, int left, int right, + std::vector& indices); + + // post_process values + std::vector> pad_hw_values_; + std::vector scale_; + float nms_threshold_ = 0.45; + float conf_threshold_ = 0.25; + int prob_box_size = 85; + int obj_class_num = 80; + int obj_num_bbox_max_size = 200; +}; + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/preprocessor.cc b/fastdeploy/vision/detection/contrib/rknpu2/preprocessor.cc new file mode 100755 index 000000000..29480459b --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/preprocessor.cc @@ -0,0 +1,127 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h" +#include "fastdeploy/function/concat.h" + +namespace fastdeploy { +namespace vision { +namespace detection { + +RKYOLOPreprocessor::RKYOLOPreprocessor() { + size_ = {640, 640}; + padding_value_ = {114.0, 114.0, 114.0}; + is_mini_pad_ = false; + is_no_pad_ = false; + is_scale_up_ = true; + stride_ = 32; + max_wh_ = 7680.0; +} + +void RKYOLOPreprocessor::LetterBox(FDMat* mat) { + std::cout << "mat->Height() = " << mat->Height() << std::endl; + std::cout << "mat->Width() = " << mat->Width() << std::endl; + + float scale = + std::min(size_[1] * 1.0 / mat->Height(), size_[0] * 1.0 / mat->Width()); + std::cout << "RKYOLOPreprocessor scale_ = " << scale << std::endl; + if (!is_scale_up_) { + scale = std::min(scale, 1.0f); + } + std::cout << "RKYOLOPreprocessor scale_ = " << scale << std::endl; + scale_.push_back(scale); + + int resize_h = int(round(mat->Height() * scale)); + int resize_w = int(round(mat->Width() * scale)); + + int pad_w = size_[0] - resize_w; + int pad_h = size_[1] - resize_h; + if (is_mini_pad_) { + pad_h = pad_h % stride_; + pad_w = pad_w % stride_; + } else if (is_no_pad_) { + pad_h = 0; + pad_w = 0; + resize_h = size_[1]; + resize_w = size_[0]; + } + + pad_hw_values_.push_back({pad_h,pad_w}); + + if (std::fabs(scale - 1.0f) > 1e-06) { + Resize::Run(mat, resize_w, resize_h); + } + if (pad_h > 0 || pad_w > 0) { + float half_h = pad_h * 1.0 / 2; + int top = int(round(half_h - 0.1)); + int bottom = int(round(half_h + 0.1)); + float half_w = pad_w * 1.0 / 2; + int left = int(round(half_w - 0.1)); + int right = int(round(half_w + 0.1)); + Pad::Run(mat, top, bottom, left, right, padding_value_); + } +} + +bool RKYOLOPreprocessor::Preprocess(FDMat* mat, FDTensor* output) { + // process after image load +// float ratio = std::min(size_[1] * 1.0f / static_cast(mat->Height()), +// size_[0] * 1.0f / static_cast(mat->Width())); +// if (std::fabs(ratio - 1.0f) > 1e-06) { +// int interp = cv::INTER_AREA; +// if (ratio > 1.0) { +// interp = cv::INTER_LINEAR; +// } +// int resize_h = int(mat->Height() * ratio); +// int resize_w = int(mat->Width() * ratio); +// Resize::Run(mat, resize_w, resize_h, -1, -1, interp); +// } + + // RKYOLO's preprocess steps + // 1. letterbox + // 2. convert_and_permute(swap_rb=true) + LetterBox(mat); + BGR2RGB::Run(mat); + mat->ShareWithTensor(output); + output->ExpandDim(0); // reshape to n, h, w, c + return true; +} + +bool RKYOLOPreprocessor::Run(std::vector* images, + std::vector* outputs) { + if (images->size() == 0) { + FDERROR << "The size of input images should be greater than 0." + << std::endl; + return false; + } + outputs->resize(1); + // Concat all the preprocessed data to a batch tensor + std::vector tensors(images->size()); + for (size_t i = 0; i < images->size(); ++i) { + if (!Preprocess(&(*images)[i], &tensors[i])) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + } + + if (tensors.size() == 1) { + (*outputs)[0] = std::move(tensors[0]); + } else { + function::Concat(tensors, &((*outputs)[0]), 0); + } + return true; +} + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h b/fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h new file mode 100755 index 000000000..e6ecfe452 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h @@ -0,0 +1,100 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { +namespace vision { + +namespace detection { +/*! @brief Preprocessor object for YOLOv5 serials model. + */ +class FASTDEPLOY_DECL RKYOLOPreprocessor { + public: + /** \brief Create a preprocessor instance for YOLOv5 serials model + */ + RKYOLOPreprocessor(); + + /** \brief Process the input image and prepare input tensors for runtime + * + * \param[in] images The input image data list, all the elements are returned by cv::imread() + * \param[in] outputs The output tensors which will feed in runtime + * \param[in] ims_info The shape info list, record input_shape and output_shape + * \return true if the preprocess successed, otherwise false + */ + bool Run(std::vector* images, std::vector* outputs); + + /// Set target size, tuple of (width, height), default size = {640, 640} + void SetSize(const std::vector& size) { size_ = size; } + + /// Get target size, tuple of (width, height), default size = {640, 640} + std::vector GetSize() const { return size_; } + + /// Set padding value, size should be the same as channels + void SetPaddingValue(const std::vector& padding_value) { + padding_value_ = padding_value; + } + + /// Get padding value, size should be the same as channels + std::vector GetPaddingValue() const { return padding_value_; } + + /// Set is_scale_up, if is_scale_up is false, the input image only + /// can be zoom out, the maximum resize scale cannot exceed 1.0, default true + void SetScaleUp(bool is_scale_up) { is_scale_up_ = is_scale_up; } + + /// Get is_scale_up, default true + bool GetScaleUp() const { return is_scale_up_; } + + std::vector> GetPadHWValues() const { + return pad_hw_values_; + } + std::vector GetScale() const { return scale_; } + + protected: + bool Preprocess(FDMat* mat, FDTensor* output); + + void LetterBox(FDMat* mat); + + // target size, tuple of (width, height), default size = {640, 640} + std::vector size_; + + // padding value, size should be the same as channels + std::vector padding_value_; + + // only pad to the minimum rectange which height and width is times of stride + bool is_mini_pad_; + + // while is_mini_pad = false and is_no_pad = true, + // will resize the image to the set size + bool is_no_pad_; + + // if is_scale_up is false, the input image only can be zoom out, + // the maximum resize scale cannot exceed 1.0 + bool is_scale_up_; + + // padding stride, for is_mini_pad + int stride_; + + // for offseting the boxes by classes when using NMS + float max_wh_; + + std::vector> pad_hw_values_; + std::vector scale_; +}; + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkyolo.cc b/fastdeploy/vision/detection/contrib/rknpu2/rkyolo.cc new file mode 100644 index 000000000..017cb1be3 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/rkyolo.cc @@ -0,0 +1,73 @@ +#include "fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h" + +namespace fastdeploy { +namespace vision { +namespace detection { + +RKYOLO::RKYOLO(const std::string& model_file, + const fastdeploy::RuntimeOption& custom_option, + const fastdeploy::ModelFormat& model_format) { + if (model_format == ModelFormat::RKNN) { + valid_cpu_backends = {}; + valid_gpu_backends = {}; + valid_rknpu_backends = {Backend::RKNPU2}; + } else { + FDERROR << "RKYOLO Only Support run in RKNPU2" << std::endl; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + initialized = Initialize(); +} + +bool RKYOLO::Initialize() { + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + auto size = GetPreprocessor().GetSize(); + GetPostprocessor().SetHeightAndWeight(size[0],size[1]); + return true; +} + +bool RKYOLO::Predict(const cv::Mat& im, + DetectionResult* result) { + std::vector results; + if (!BatchPredict({im}, &results)) { + return false; + } + *result = std::move(results[0]); + return true; +} + +bool RKYOLO::BatchPredict(const std::vector& images, + std::vector* results) { + std::vector fd_images = WrapMat(images); + + if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) { + FDERROR << "Failed to preprocess the input image." << std::endl; + return false; + } + auto pad_hw_values_ = preprocessor_.GetPadHWValues(); + postprocessor_.SetPadHWValues(preprocessor_.GetPadHWValues()); + std::cout << "preprocessor_ scale_ = " << preprocessor_.GetScale()[0] << std::endl; + postprocessor_.SetScale(preprocessor_.GetScale()); + + reused_input_tensors_[0].name = InputInfoOfRuntime(0).name; + if (!Infer(reused_input_tensors_, &reused_output_tensors_)) { + FDERROR << "Failed to inference by runtime." << std::endl; + return false; + } + + + if (!postprocessor_.Run(reused_output_tensors_, results)) { + FDERROR << "Failed to postprocess the inference results by runtime." << std::endl; + return false; + } + + return true; +} + +} // namespace detection +} // namespace vision +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h b/fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h new file mode 100644 index 000000000..d7190eb73 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h @@ -0,0 +1,64 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h" +#include "fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h" + +namespace fastdeploy { +namespace vision { +namespace detection { + +class FASTDEPLOY_DECL RKYOLO : public FastDeployModel { + public: + RKYOLO(const std::string& model_file, + const RuntimeOption& custom_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::RKNN); + + std::string ModelName() const { return "RKYOLO"; } + + /** \brief Predict the detection result for an input image + * + * \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format + * \param[in] result The output detection result will be writen to this structure + * \return true if the prediction successed, otherwise false + */ + virtual bool Predict(const cv::Mat& img, DetectionResult* result); + + /** \brief Predict the detection results for a batch of input images + * + * \param[in] imgs, The input image list, each element comes from cv::imread() + * \param[in] results The output detection result list + * \return true if the prediction successed, otherwise false + */ + virtual bool BatchPredict(const std::vector& imgs, + std::vector* results); + + /// Get preprocessor reference of YOLOv5 + RKYOLOPreprocessor& GetPreprocessor() { return preprocessor_; } + + /// Get postprocessor reference of YOLOv5 + RKYOLOPostprocessor& GetPostprocessor() { return postprocessor_; } + + protected: + bool Initialize(); + RKYOLOPreprocessor preprocessor_; + RKYOLOPostprocessor postprocessor_; +}; + +} // namespace detection +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/rkyolo_pybind.cc b/fastdeploy/vision/detection/contrib/rknpu2/rkyolo_pybind.cc new file mode 100755 index 000000000..716464458 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/rkyolo_pybind.cc @@ -0,0 +1,95 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindRKYOLO(pybind11::module& m) { + pybind11::class_( + m, "RKYOLOPreprocessor") + .def(pybind11::init<>()) + .def("run", [](vision::detection::RKYOLOPreprocessor& self, + std::vector& im_list) { + std::vector images; + for (size_t i = 0; i < im_list.size(); ++i) { + images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i]))); + } + std::vector outputs; + if (!self.Run(&images, &outputs)) { + throw std::runtime_error("Failed to preprocess the input data in PaddleClasPreprocessor."); + } + for (size_t i = 0; i < outputs.size(); ++i) { + outputs[i].StopSharing(); + } + return outputs; + }) + .def_property("size", &vision::detection::RKYOLOPreprocessor::GetSize, + &vision::detection::RKYOLOPreprocessor::SetSize) + .def_property("padding_value", &vision::detection::RKYOLOPreprocessor::GetPaddingValue, + &vision::detection::RKYOLOPreprocessor::SetPaddingValue) + .def_property("is_scale_up", &vision::detection::RKYOLOPreprocessor::GetScaleUp, + &vision::detection::RKYOLOPreprocessor::SetScaleUp); + + pybind11::class_( + m, "RKYOLOPostprocessor") + .def(pybind11::init<>()) + .def("run", [](vision::detection::RKYOLOPostprocessor& self, + std::vector& inputs) { + std::vector results; + if (!self.Run(inputs, &results)) { + throw std::runtime_error("Failed to postprocess the runtime result in RKYOLOV5Postprocessor."); + } + return results; + }) + .def("run", [](vision::detection::RKYOLOPostprocessor& self, + std::vector& input_array) { + std::vector results; + std::vector inputs; + PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true); + if (!self.Run(inputs, &results)) { + throw std::runtime_error("Failed to postprocess the runtime result in RKYOLOV5Postprocessor."); + } + return results; + }) + .def_property("conf_threshold", &vision::detection::RKYOLOPostprocessor::GetConfThreshold, + &vision::detection::RKYOLOPostprocessor::SetConfThreshold) + .def_property("nms_threshold", &vision::detection::RKYOLOPostprocessor::GetNMSThreshold, + &vision::detection::RKYOLOPostprocessor::SetNMSThreshold); + + pybind11::class_(m, "RKYOLOV5") + .def(pybind11::init()) + .def("predict", + [](vision::detection::RKYOLOV5& self, + pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(mat, &res); + return res; + }) + .def("batch_predict", [](vision::detection::RKYOLOV5& self, + std::vector& data) { + std::vector images; + for (size_t i = 0; i < data.size(); ++i) { + images.push_back(PyArrayToCvMat(data[i])); + } + std::vector results; + self.BatchPredict(images, &results); + return results; + }) + .def_property_readonly("preprocessor", &vision::detection::RKYOLOV5::GetPreprocessor) + .def_property_readonly("postprocessor", &vision::detection::RKYOLOV5::GetPostprocessor); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/detection/contrib/rknpu2/utils.cc b/fastdeploy/vision/detection/contrib/rknpu2/utils.cc new file mode 100644 index 000000000..faac26983 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/utils.cc @@ -0,0 +1,93 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "fastdeploy/vision/detection/contrib/rknpu2/utils.h" +float clamp(float val, int min, int max) { + return val > min ? (val < max ? val : max) : min; +} + +float Sigmoid(float x) { return 1.0 / (1.0 + expf(-x)); } + +float UnSigmoid(float y) { return -1.0 * logf((1.0 / y) - 1.0); } + +inline int32_t __clip(float val, float min, float max) { + float f = val <= min ? min : (val >= max ? max : val); + return f; +} + +int8_t QntF32ToAffine(float f32, int32_t zp, float scale) { + float dst_val = (f32 / scale) + zp; + int8_t res = (int8_t)__clip(dst_val, -128, 127); + return res; +} + +float DeqntAffineToF32(int8_t qnt, int32_t zp, float scale) { + return ((float)qnt - (float)zp) * scale; +} + +static float CalculateOverlap(float xmin0, float ymin0, float xmax0, float ymax0, float xmin1, float ymin1, float xmax1, float ymax1) +{ + float w = fmax(0.f, fmin(xmax0, xmax1) - fmax(xmin0, xmin1) + 1.0); + float h = fmax(0.f, fmin(ymax0, ymax1) - fmax(ymin0, ymin1) + 1.0); + float i = w * h; + float u = (xmax0 - xmin0 + 1.0) * (ymax0 - ymin0 + 1.0) + (xmax1 - xmin1 + 1.0) * (ymax1 - ymin1 + 1.0) - i; + return u <= 0.f ? 0.f : (i / u); +} + +int NMS(int validCount, + std::vector &outputLocations, + std::vector &class_id, + std::vector &order, + float threshold, + bool class_agnostic) +{ + // printf("class_agnostic: %d\n", class_agnostic); + for (int i = 0; i < validCount; ++i) + { + if (order[i] == -1) + { + continue; + } + int n = order[i]; + for (int j = i + 1; j < validCount; ++j) + { + int m = order[j]; + if (m == -1) + { + continue; + } + + if (!class_agnostic && class_id[n] != class_id[m]){ + continue; + } + + float xmin0 = outputLocations[n * 4 + 0]; + float ymin0 = outputLocations[n * 4 + 1]; + float xmax0 = outputLocations[n * 4 + 0] + outputLocations[n * 4 + 2]; + float ymax0 = outputLocations[n * 4 + 1] + outputLocations[n * 4 + 3]; + + float xmin1 = outputLocations[m * 4 + 0]; + float ymin1 = outputLocations[m * 4 + 1]; + float xmax1 = outputLocations[m * 4 + 0] + outputLocations[m * 4 + 2]; + float ymax1 = outputLocations[m * 4 + 1] + outputLocations[m * 4 + 3]; + + float iou = CalculateOverlap(xmin0, ymin0, xmax0, ymax0, xmin1, ymin1, xmax1, ymax1); + + if (iou > threshold) + { + order[j] = -1; + } + } + } + return 0; +} \ No newline at end of file diff --git a/fastdeploy/vision/detection/contrib/rknpu2/utils.h b/fastdeploy/vision/detection/contrib/rknpu2/utils.h new file mode 100644 index 000000000..4414cb8a5 --- /dev/null +++ b/fastdeploy/vision/detection/contrib/rknpu2/utils.h @@ -0,0 +1,26 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include +typedef enum { RKYOLOX = 0, RKYOLOV5, RKYOLOV7, UNKNOWN } ModelType; +float clamp(float val, int min, int max); +float Sigmoid(float x); +float UnSigmoid(float y); +inline static int32_t __clip(float val, float min, float max); +int8_t QntF32ToAffine(float f32, int32_t zp, float scale); +float DeqntAffineToF32(int8_t qnt, int32_t zp, float scale); +int NMS(int validCount, std::vector& outputLocations, + std::vector& class_id, std::vector& order, float threshold, + bool class_agnostic); diff --git a/fastdeploy/vision/detection/detection_pybind.cc b/fastdeploy/vision/detection/detection_pybind.cc index b3a7a6ad9..9d585e18c 100644 --- a/fastdeploy/vision/detection/detection_pybind.cc +++ b/fastdeploy/vision/detection/detection_pybind.cc @@ -27,6 +27,7 @@ void BindNanoDetPlus(pybind11::module& m); void BindPPDet(pybind11::module& m); void BindYOLOv7End2EndTRT(pybind11::module& m); void BindYOLOv7End2EndORT(pybind11::module& m); +void BindRKYOLO(pybind11::module& m); void BindDetection(pybind11::module& m) { auto detection_module = @@ -42,5 +43,6 @@ void BindDetection(pybind11::module& m) { BindNanoDetPlus(detection_module); BindYOLOv7End2EndTRT(detection_module); BindYOLOv7End2EndORT(detection_module); + BindRKYOLO(detection_module); } } // namespace fastdeploy diff --git a/python/fastdeploy/vision/detection/__init__.py b/python/fastdeploy/vision/detection/__init__.py index b5f01f3a7..afd1cd8ce 100755 --- a/python/fastdeploy/vision/detection/__init__.py +++ b/python/fastdeploy/vision/detection/__init__.py @@ -24,3 +24,4 @@ from .contrib.yolov6 import YOLOv6 from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT from .contrib.yolov7end2end_ort import YOLOv7End2EndORT from .ppdet import * +from .contrib.rkyolo import * diff --git a/python/fastdeploy/vision/detection/contrib/rkyolo/__init__.py b/python/fastdeploy/vision/detection/contrib/rkyolo/__init__.py new file mode 100644 index 000000000..ce89483ce --- /dev/null +++ b/python/fastdeploy/vision/detection/contrib/rkyolo/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from .rkyolov5 import * diff --git a/python/fastdeploy/vision/detection/contrib/rkyolo/rkyolov5.py b/python/fastdeploy/vision/detection/contrib/rkyolo/rkyolov5.py new file mode 100644 index 000000000..bfefb2127 --- /dev/null +++ b/python/fastdeploy/vision/detection/contrib/rkyolo/rkyolov5.py @@ -0,0 +1,195 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import logging +from ..... import FastDeployModel, ModelFormat +from ..... import c_lib_wrap as C + + +class RKYOLOPreprocessor: + def __init__(self): + """Create a preprocessor for RKYOLOV5 + """ + self._preprocessor = C.vision.detection.RKYOLOPreprocessor() + + def run(self, input_ims): + """Preprocess input images for RKYOLOV5 + + :param: input_ims: (list of numpy.ndarray)The input image + :return: list of FDTensor + """ + return self._preprocessor.run(input_ims) + + @property + def size(self): + """ + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] + """ + return self._preprocessor.size + + @property + def padding_value(self): + """ + padding value for preprocessing, default [114.0, 114.0, 114.0] + """ + # padding value, size should be the same as channels + return self._preprocessor.padding_value + + @property + def is_scale_up(self): + """ + is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true + """ + return self._preprocessor.is_scale_up + + @size.setter + def size(self, wh): + assert isinstance(wh, (list, tuple)), \ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2, \ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self._preprocessor.size = wh + + @padding_value.setter + def padding_value(self, value): + assert isinstance( + value, + list), "The value to set `padding_value` must be type of list." + self._preprocessor.padding_value = value + + @is_scale_up.setter + def is_scale_up(self, value): + assert isinstance( + value, + bool), "The value to set `is_scale_up` must be type of bool." + self._preprocessor.is_scale_up = value + + +class RKYOLOPostprocessor: + def __init__(self): + """Create a postprocessor for RKYOLOV5 + """ + self._postprocessor = C.vision.detection.RKYOLOPostprocessor() + + def run(self, runtime_results): + """Postprocess the runtime results for RKYOLOV5 + + :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime + :param: ims_info: (list of dict)Record input_shape and output_shape + :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size) + """ + return self._postprocessor.run(runtime_results) + + @property + def conf_threshold(self): + """ + confidence threshold for postprocessing, default is 0.25 + """ + return self._postprocessor.conf_threshold + + @property + def nms_threshold(self): + """ + nms threshold for postprocessing, default is 0.5 + """ + return self._postprocessor.nms_threshold + + @property + def multi_label(self): + """ + multi_label for postprocessing, set true for eval, default is True + """ + return self._postprocessor.multi_label + + @conf_threshold.setter + def conf_threshold(self, conf_threshold): + assert isinstance(conf_threshold, float), \ + "The value to set `conf_threshold` must be type of float." + self._postprocessor.conf_threshold = conf_threshold + + @nms_threshold.setter + def nms_threshold(self, nms_threshold): + assert isinstance(nms_threshold, float), \ + "The value to set `nms_threshold` must be type of float." + self._postprocessor.nms_threshold = nms_threshold + + @multi_label.setter + def multi_label(self, value): + assert isinstance( + value, + bool), "The value to set `multi_label` must be type of bool." + self._postprocessor.multi_label = value + + +class RKYOLOV5(FastDeployModel): + def __init__(self, + model_file, + params_file="", + runtime_option=None, + model_format=ModelFormat.ONNX): + """Load a RKYOLOV5 model exported by RKYOLOV5. + + :param model_file: (str)Path of model file, e.g ./yolov5.onnx + :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string + :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU + :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model + """ + # 调用基函数进行backend_option的初始化 + # 初始化后的option保存在self._runtime_option + super(RKYOLOV5, self).__init__(runtime_option) + + self._model = C.vision.detection.RKYOLOV5( + model_file, self._runtime_option, model_format) + # 通过self.initialized判断整个模型的初始化是否成功 + assert self.initialized, "RKYOLOV5 initialize failed." + + def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): + """Detect an input image + + :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format + :param conf_threshold: confidence threshold for postprocessing, default is 0.25 + :param nms_iou_threshold: iou threshold for NMS, default is 0.5 + :return: DetectionResult + """ + + self.postprocessor.conf_threshold = conf_threshold + self.postprocessor.nms_threshold = nms_iou_threshold + return self._model.predict(input_image) + + def batch_predict(self, images): + """Classify a batch of input image + + :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format + :return list of DetectionResult + """ + + return self._model.batch_predict(images) + + @property + def preprocessor(self): + """Get RKYOLOV5Preprocessor object of the loaded model + + :return RKYOLOV5Preprocessor + """ + return self._model.preprocessor + + @property + def postprocessor(self): + """Get RKYOLOV5Postprocessor object of the loaded model + + :return RKYOLOV5Postprocessor + """ + return self._model.postprocessor