Add RKYOLOv5 RKYOLOX RKYOLOV7 (#709)

* 更正代码格式

* 更正代码格式

* 修复语法错误

* fix rk error

* update

* update

* update

* update

* update

* update

* update

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
Zheng_Bicheng
2022-12-10 15:44:00 +08:00
committed by GitHub
parent 6f5521e63e
commit c7dc7d5eee
25 changed files with 1516 additions and 1 deletions

View File

@@ -0,0 +1,92 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h"
namespace fastdeploy {
namespace vision {
namespace detection {
class FASTDEPLOY_DECL RKYOLOV5 : public RKYOLO {
public:
/** \brief Set path of model file and configuration file, and the configuration of runtime
*
* \param[in] model_file Path of model file, e.g picodet/model.pdmodel
* \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
* \param[in] model_format Model format of the loaded model, default is Paddle format
*/
RKYOLOV5(const std::string& model_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::RKNN)
: RKYOLO(model_file, custom_option, model_format) {
valid_cpu_backends = {};
valid_gpu_backends = {};
valid_rknpu_backends = {Backend::RKNPU2};
GetPostprocessor().SetModelType(ModelType::RKYOLOV5);
}
virtual std::string ModelName() const { return "RKYOLOV5"; }
};
class FASTDEPLOY_DECL RKYOLOV7 : public RKYOLO {
public:
/** \brief Set path of model file and configuration file, and the configuration of runtime
*
* \param[in] model_file Path of model file, e.g picodet/model.pdmodel
* \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
* \param[in] model_format Model format of the loaded model, default is Paddle format
*/
RKYOLOV7(const std::string& model_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::RKNN)
: RKYOLO(model_file, custom_option, model_format) {
valid_cpu_backends = {};
valid_gpu_backends = {};
valid_rknpu_backends = {Backend::RKNPU2};
GetPostprocessor().SetModelType(ModelType::RKYOLOV7);
}
virtual std::string ModelName() const { return "RKYOLOV7"; }
};
class FASTDEPLOY_DECL RKYOLOX : public RKYOLO {
public:
/** \brief Set path of model file and configuration file, and the configuration of runtime
*
* \param[in] model_file Path of model file, e.g picodet/model.pdmodel
* \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
* \param[in] model_format Model format of the loaded model, default is Paddle format
*/
RKYOLOX(const std::string& model_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::RKNN)
: RKYOLO(model_file, custom_option, model_format) {
valid_cpu_backends = {};
valid_gpu_backends = {};
valid_rknpu_backends = {Backend::RKNPU2};
GetPostprocessor().SetModelType(ModelType::RKYOLOX);
}
virtual std::string ModelName() const { return "RKYOLOV7"; }
};
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,239 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h"
#include "fastdeploy/vision/utils/utils.h"
namespace fastdeploy {
namespace vision {
namespace detection {
RKYOLOPostprocessor::RKYOLOPostprocessor() {}
void RKYOLOPostprocessor::SetModelType(ModelType model_type) {
model_type_ = model_type;
if (model_type == RKYOLOV5) {
anchors_ = {10, 13, 16, 30, 33, 23, 30, 61, 62,
45, 59, 119, 116, 90, 156, 198, 373, 326};
anchor_per_branch_ = 3;
} else if (model_type == RKYOLOX) {
anchors_ = {10, 13, 16, 30, 33, 23, 30, 61, 62,
45, 59, 119, 116, 90, 156, 198, 373, 326};
anchor_per_branch_ = 1;
} else if (model_type == RKYOLOV7) {
anchors_ = {12, 16, 19, 36, 40, 28, 36, 75, 76,
55, 72, 146, 142, 110, 192, 243, 459, 401};
anchor_per_branch_ = 3;
} else {
return;
}
}
bool RKYOLOPostprocessor::Run(const std::vector<FDTensor>& tensors,
std::vector<DetectionResult>* results) {
if (model_type_ == ModelType::UNKNOWN) {
FDERROR << "RKYOLO Only Support YOLOV5,YOLOV7,YOLOX" << std::endl;
return false;
}
results->resize(tensors[0].shape[0]);
for (int num = 0; num < tensors[0].shape[0]; ++num) {
int validCount = 0;
std::vector<float> filterBoxes;
std::vector<float> boxesScore;
std::vector<int> classId;
for (int i = 0; i < tensors.size(); i++) {
auto tensor_shape = tensors[i].shape;
auto skip_num = std::accumulate(tensor_shape.begin(), tensor_shape.end(),
1, std::multiplies<int>());
int skip_address = num * skip_num;
int stride = strides_[i];
int grid_h = height_ / stride;
int grid_w = width_ / stride;
int* anchor = &(anchors_.data()[i * 2 * anchor_per_branch_]);
if (tensors[i].dtype == FDDataType::INT8 || tensors[i].dtype == FDDataType::UINT8) {
auto quantization_info = tensors[i].GetQuantizationInfo();
validCount = validCount +
ProcessInt8((int8_t*)tensors[i].Data() + skip_address,
anchor, grid_h, grid_w, stride, filterBoxes,
boxesScore, classId, conf_threshold_,
quantization_info.first, quantization_info.second[0]);
} else {
FDERROR << "RKYOLO Only Support INT8 Model" << std::endl;
}
}
// no object detect
if (validCount <= 0) {
FDINFO << "The number of object detect is 0." << std::endl;
return true;
}
std::vector<int> indexArray;
for (int i = 0; i < validCount; ++i) {
indexArray.push_back(i);
}
QuickSortIndiceInverse(boxesScore, 0, validCount - 1, indexArray);
if (model_type_ == RKYOLOV5 || model_type_ == RKYOLOV7) {
NMS(validCount, filterBoxes, classId, indexArray, nms_threshold_, false);
} else if (model_type_ == RKYOLOX) {
NMS(validCount, filterBoxes, classId, indexArray, nms_threshold_, true);
}
int last_count = 0;
(*results)[num].Clear();
(*results)[num].Reserve(validCount);
/* box valid detect target */
for (int i = 0; i < validCount; ++i) {
if (indexArray[i] == -1 || boxesScore[i] < conf_threshold_ ||
last_count >= obj_num_bbox_max_size) {
continue;
}
int n = indexArray[i];
float x1 = filterBoxes[n * 4 + 0];
float y1 = filterBoxes[n * 4 + 1];
float x2 = x1 + filterBoxes[n * 4 + 2];
float y2 = y1 + filterBoxes[n * 4 + 3];
int id = classId[n];
(*results)[num].boxes.emplace_back(std::array<float, 4>{
(float)((clamp(x1, 0, width_) - pad_hw_values_[num][1] / 2) /
scale_[num]),
(float)((clamp(y1, 0, height_) - pad_hw_values_[num][0] / 2) /
scale_[num]),
(float)((clamp(x2, 0, width_) - pad_hw_values_[num][1] / 2) /
scale_[num]),
(float)((clamp(y2, 0, height_) - pad_hw_values_[num][0] / 2) /
scale_[0])});
(*results)[num].label_ids.push_back(id);
(*results)[num].scores.push_back(boxesScore[i]);
last_count++;
}
std::cout << "last_count" << last_count << std::endl;
}
return true;
}
int RKYOLOPostprocessor::ProcessInt8(int8_t* input, int* anchor, int grid_h,
int grid_w, int stride,
std::vector<float>& boxes,
std::vector<float>& boxScores,
std::vector<int>& classId, float threshold,
int32_t zp, float scale) {
int validCount = 0;
int grid_len = grid_h * grid_w;
float thres = threshold;
auto thres_i8 = QntF32ToAffine(thres, zp, scale);
for (int a = 0; a < anchor_per_branch_; a++) {
for (int i = 0; i < grid_h; i++) {
for (int j = 0; j < grid_w; j++) {
int8_t box_confidence =
input[(prob_box_size * a + 4) * grid_len + i * grid_w + j];
if (box_confidence >= thres_i8) {
int offset = (prob_box_size * a) * grid_len + i * grid_w + j;
int8_t* in_ptr = input + offset;
int8_t maxClassProbs = in_ptr[5 * grid_len];
int maxClassId = 0;
for (int k = 1; k < obj_class_num; ++k) {
int8_t prob = in_ptr[(5 + k) * grid_len];
if (prob > maxClassProbs) {
maxClassId = k;
maxClassProbs = prob;
}
}
float box_conf_f32 = DeqntAffineToF32(box_confidence, zp, scale);
float class_prob_f32 = DeqntAffineToF32(maxClassProbs, zp, scale);
float limit_score = 0;
if (model_type_ == RKYOLOX) {
limit_score = box_conf_f32 * class_prob_f32;
} else {
limit_score = class_prob_f32;
}
//printf("limit score: %f\n", limit_score);
if (limit_score > conf_threshold_) {
float box_x, box_y, box_w, box_h;
if (model_type_ == RKYOLOX) {
box_x = DeqntAffineToF32(*in_ptr, zp, scale);
box_y = DeqntAffineToF32(in_ptr[grid_len], zp, scale);
box_w = DeqntAffineToF32(in_ptr[2 * grid_len], zp, scale);
box_h = DeqntAffineToF32(in_ptr[3 * grid_len], zp, scale);
box_w = exp(box_w) * stride;
box_h = exp(box_h) * stride;
} else {
box_x = DeqntAffineToF32(*in_ptr, zp, scale) * 2.0 - 0.5;
box_y = DeqntAffineToF32(in_ptr[grid_len], zp, scale) * 2.0 - 0.5;
box_w = DeqntAffineToF32(in_ptr[2 * grid_len], zp, scale) * 2.0;
box_h = DeqntAffineToF32(in_ptr[3 * grid_len], zp, scale) * 2.0;
box_w = box_w * box_w;
box_h = box_h * box_h;
}
box_x = (box_x + j) * (float)stride;
box_y = (box_y + i) * (float)stride;
box_w *= (float)anchor[a * 2];
box_h *= (float)anchor[a * 2 + 1];
box_x -= (box_w / 2.0);
box_y -= (box_h / 2.0);
boxes.push_back(box_x);
boxes.push_back(box_y);
boxes.push_back(box_w);
boxes.push_back(box_h);
boxScores.push_back(box_conf_f32 * class_prob_f32);
classId.push_back(maxClassId);
validCount++;
}
}
}
}
}
return validCount;
}
int RKYOLOPostprocessor::QuickSortIndiceInverse(std::vector<float>& input,
int left, int right,
std::vector<int>& indices) {
float key;
int key_index;
int low = left;
int high = right;
if (left < right) {
key_index = indices[left];
key = input[left];
while (low < high) {
while (low < high && input[high] <= key) {
high--;
}
input[low] = input[high];
indices[low] = indices[high];
while (low < high && input[low] >= key) {
low++;
}
input[high] = input[low];
indices[high] = indices[low];
}
input[low] = key;
indices[low] = key_index;
QuickSortIndiceInverse(input, left, low - 1, indices);
QuickSortIndiceInverse(input, low + 1, right, indices);
}
return low;
}
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,105 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
#include "fastdeploy/vision/detection/contrib/rknpu2/utils.h"
#include <array>
namespace fastdeploy {
namespace vision {
namespace detection {
/*! @brief Postprocessor object for YOLOv5 serials model.
*/
class FASTDEPLOY_DECL RKYOLOPostprocessor {
public:
/** \brief Create a postprocessor instance for YOLOv5 serials model
*/
RKYOLOPostprocessor();
/** \brief Process the result of runtime and fill to DetectionResult structure
*
* \param[in] tensors The inference result from runtime
* \param[in] result The output result of detection
* \param[in] ims_info The shape info list, record input_shape and output_shape
* \return true if the postprocess successed, otherwise false
*/
bool Run(const std::vector<FDTensor>& tensors,
std::vector<DetectionResult>* results);
/// Set nms_threshold, default 0.45
void SetNMSThreshold(const float& nms_threshold) {
nms_threshold_ = nms_threshold;
}
/// Set conf_threshold, default 0.25
void SetConfThreshold(const float& conf_threshold) {
conf_threshold_ = conf_threshold;
}
/// Get conf_threshold, default 0.25
float GetConfThreshold() const { return conf_threshold_; }
/// Get nms_threshold, default 0.45
float GetNMSThreshold() const { return nms_threshold_; }
// Set model_type
void SetModelType(ModelType model_type);
// Set height and weight
void SetHeightAndWeight(int& height, int& width) {
height_ = height;
width_ = width;
}
// Set pad_hw_values
void SetPadHWValues(std::vector<std::vector<int>> pad_hw_values) {
pad_hw_values_ = pad_hw_values;
}
// Set scale
void SetScale(std::vector<float> scale) { scale_ = scale; }
private:
ModelType model_type_ = ModelType::UNKNOWN;
std::vector<int> anchors_ = {10, 13, 16, 30, 33, 23, 30, 61, 62,
45, 59, 119, 116, 90, 156, 198, 373, 326};
int strides_[3] = {8, 16, 32};
int height_ = 0;
int width_ = 0;
int anchor_per_branch_ = 0;
// Process Int8 Model
int ProcessInt8(int8_t* input, int* anchor, int grid_h, int grid_w,
int stride, std::vector<float>& boxes,
std::vector<float>& boxScores, std::vector<int>& classId,
float threshold, int32_t zp, float scale);
// Model
int QuickSortIndiceInverse(std::vector<float>& input, int left, int right,
std::vector<int>& indices);
// post_process values
std::vector<std::vector<int>> pad_hw_values_;
std::vector<float> scale_;
float nms_threshold_ = 0.45;
float conf_threshold_ = 0.25;
int prob_box_size = 85;
int obj_class_num = 80;
int obj_num_bbox_max_size = 200;
};
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,127 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h"
#include "fastdeploy/function/concat.h"
namespace fastdeploy {
namespace vision {
namespace detection {
RKYOLOPreprocessor::RKYOLOPreprocessor() {
size_ = {640, 640};
padding_value_ = {114.0, 114.0, 114.0};
is_mini_pad_ = false;
is_no_pad_ = false;
is_scale_up_ = true;
stride_ = 32;
max_wh_ = 7680.0;
}
void RKYOLOPreprocessor::LetterBox(FDMat* mat) {
std::cout << "mat->Height() = " << mat->Height() << std::endl;
std::cout << "mat->Width() = " << mat->Width() << std::endl;
float scale =
std::min(size_[1] * 1.0 / mat->Height(), size_[0] * 1.0 / mat->Width());
std::cout << "RKYOLOPreprocessor scale_ = " << scale << std::endl;
if (!is_scale_up_) {
scale = std::min(scale, 1.0f);
}
std::cout << "RKYOLOPreprocessor scale_ = " << scale << std::endl;
scale_.push_back(scale);
int resize_h = int(round(mat->Height() * scale));
int resize_w = int(round(mat->Width() * scale));
int pad_w = size_[0] - resize_w;
int pad_h = size_[1] - resize_h;
if (is_mini_pad_) {
pad_h = pad_h % stride_;
pad_w = pad_w % stride_;
} else if (is_no_pad_) {
pad_h = 0;
pad_w = 0;
resize_h = size_[1];
resize_w = size_[0];
}
pad_hw_values_.push_back({pad_h,pad_w});
if (std::fabs(scale - 1.0f) > 1e-06) {
Resize::Run(mat, resize_w, resize_h);
}
if (pad_h > 0 || pad_w > 0) {
float half_h = pad_h * 1.0 / 2;
int top = int(round(half_h - 0.1));
int bottom = int(round(half_h + 0.1));
float half_w = pad_w * 1.0 / 2;
int left = int(round(half_w - 0.1));
int right = int(round(half_w + 0.1));
Pad::Run(mat, top, bottom, left, right, padding_value_);
}
}
bool RKYOLOPreprocessor::Preprocess(FDMat* mat, FDTensor* output) {
// process after image load
// float ratio = std::min(size_[1] * 1.0f / static_cast<float>(mat->Height()),
// size_[0] * 1.0f / static_cast<float>(mat->Width()));
// if (std::fabs(ratio - 1.0f) > 1e-06) {
// int interp = cv::INTER_AREA;
// if (ratio > 1.0) {
// interp = cv::INTER_LINEAR;
// }
// int resize_h = int(mat->Height() * ratio);
// int resize_w = int(mat->Width() * ratio);
// Resize::Run(mat, resize_w, resize_h, -1, -1, interp);
// }
// RKYOLO's preprocess steps
// 1. letterbox
// 2. convert_and_permute(swap_rb=true)
LetterBox(mat);
BGR2RGB::Run(mat);
mat->ShareWithTensor(output);
output->ExpandDim(0); // reshape to n, h, w, c
return true;
}
bool RKYOLOPreprocessor::Run(std::vector<FDMat>* images,
std::vector<FDTensor>* outputs) {
if (images->size() == 0) {
FDERROR << "The size of input images should be greater than 0."
<< std::endl;
return false;
}
outputs->resize(1);
// Concat all the preprocessed data to a batch tensor
std::vector<FDTensor> tensors(images->size());
for (size_t i = 0; i < images->size(); ++i) {
if (!Preprocess(&(*images)[i], &tensors[i])) {
FDERROR << "Failed to preprocess input image." << std::endl;
return false;
}
}
if (tensors.size() == 1) {
(*outputs)[0] = std::move(tensors[0]);
} else {
function::Concat(tensors, &((*outputs)[0]), 0);
}
return true;
}
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,100 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
namespace fastdeploy {
namespace vision {
namespace detection {
/*! @brief Preprocessor object for YOLOv5 serials model.
*/
class FASTDEPLOY_DECL RKYOLOPreprocessor {
public:
/** \brief Create a preprocessor instance for YOLOv5 serials model
*/
RKYOLOPreprocessor();
/** \brief Process the input image and prepare input tensors for runtime
*
* \param[in] images The input image data list, all the elements are returned by cv::imread()
* \param[in] outputs The output tensors which will feed in runtime
* \param[in] ims_info The shape info list, record input_shape and output_shape
* \return true if the preprocess successed, otherwise false
*/
bool Run(std::vector<FDMat>* images, std::vector<FDTensor>* outputs);
/// Set target size, tuple of (width, height), default size = {640, 640}
void SetSize(const std::vector<int>& size) { size_ = size; }
/// Get target size, tuple of (width, height), default size = {640, 640}
std::vector<int> GetSize() const { return size_; }
/// Set padding value, size should be the same as channels
void SetPaddingValue(const std::vector<float>& padding_value) {
padding_value_ = padding_value;
}
/// Get padding value, size should be the same as channels
std::vector<float> GetPaddingValue() const { return padding_value_; }
/// Set is_scale_up, if is_scale_up is false, the input image only
/// can be zoom out, the maximum resize scale cannot exceed 1.0, default true
void SetScaleUp(bool is_scale_up) { is_scale_up_ = is_scale_up; }
/// Get is_scale_up, default true
bool GetScaleUp() const { return is_scale_up_; }
std::vector<std::vector<int>> GetPadHWValues() const {
return pad_hw_values_;
}
std::vector<float> GetScale() const { return scale_; }
protected:
bool Preprocess(FDMat* mat, FDTensor* output);
void LetterBox(FDMat* mat);
// target size, tuple of (width, height), default size = {640, 640}
std::vector<int> size_;
// padding value, size should be the same as channels
std::vector<float> padding_value_;
// only pad to the minimum rectange which height and width is times of stride
bool is_mini_pad_;
// while is_mini_pad = false and is_no_pad = true,
// will resize the image to the set size
bool is_no_pad_;
// if is_scale_up is false, the input image only can be zoom out,
// the maximum resize scale cannot exceed 1.0
bool is_scale_up_;
// padding stride, for is_mini_pad
int stride_;
// for offseting the boxes by classes when using NMS
float max_wh_;
std::vector<std::vector<int>> pad_hw_values_;
std::vector<float> scale_;
};
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,73 @@
#include "fastdeploy/vision/detection/contrib/rknpu2/rkyolo.h"
namespace fastdeploy {
namespace vision {
namespace detection {
RKYOLO::RKYOLO(const std::string& model_file,
const fastdeploy::RuntimeOption& custom_option,
const fastdeploy::ModelFormat& model_format) {
if (model_format == ModelFormat::RKNN) {
valid_cpu_backends = {};
valid_gpu_backends = {};
valid_rknpu_backends = {Backend::RKNPU2};
} else {
FDERROR << "RKYOLO Only Support run in RKNPU2" << std::endl;
}
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;
initialized = Initialize();
}
bool RKYOLO::Initialize() {
if (!InitRuntime()) {
FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
return false;
}
auto size = GetPreprocessor().GetSize();
GetPostprocessor().SetHeightAndWeight(size[0],size[1]);
return true;
}
bool RKYOLO::Predict(const cv::Mat& im,
DetectionResult* result) {
std::vector<DetectionResult> results;
if (!BatchPredict({im}, &results)) {
return false;
}
*result = std::move(results[0]);
return true;
}
bool RKYOLO::BatchPredict(const std::vector<cv::Mat>& images,
std::vector<DetectionResult>* results) {
std::vector<FDMat> fd_images = WrapMat(images);
if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
FDERROR << "Failed to preprocess the input image." << std::endl;
return false;
}
auto pad_hw_values_ = preprocessor_.GetPadHWValues();
postprocessor_.SetPadHWValues(preprocessor_.GetPadHWValues());
std::cout << "preprocessor_ scale_ = " << preprocessor_.GetScale()[0] << std::endl;
postprocessor_.SetScale(preprocessor_.GetScale());
reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
FDERROR << "Failed to inference by runtime." << std::endl;
return false;
}
if (!postprocessor_.Run(reused_output_tensors_, results)) {
FDERROR << "Failed to postprocess the inference results by runtime." << std::endl;
return false;
}
return true;
}
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,64 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/fastdeploy_model.h"
#include "fastdeploy/vision/detection/contrib/rknpu2/postprocessor.h"
#include "fastdeploy/vision/detection/contrib/rknpu2/preprocessor.h"
namespace fastdeploy {
namespace vision {
namespace detection {
class FASTDEPLOY_DECL RKYOLO : public FastDeployModel {
public:
RKYOLO(const std::string& model_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::RKNN);
std::string ModelName() const { return "RKYOLO"; }
/** \brief Predict the detection result for an input image
*
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result The output detection result will be writen to this structure
* \return true if the prediction successed, otherwise false
*/
virtual bool Predict(const cv::Mat& img, DetectionResult* result);
/** \brief Predict the detection results for a batch of input images
*
* \param[in] imgs, The input image list, each element comes from cv::imread()
* \param[in] results The output detection result list
* \return true if the prediction successed, otherwise false
*/
virtual bool BatchPredict(const std::vector<cv::Mat>& imgs,
std::vector<DetectionResult>* results);
/// Get preprocessor reference of YOLOv5
RKYOLOPreprocessor& GetPreprocessor() { return preprocessor_; }
/// Get postprocessor reference of YOLOv5
RKYOLOPostprocessor& GetPostprocessor() { return postprocessor_; }
protected:
bool Initialize();
RKYOLOPreprocessor preprocessor_;
RKYOLOPostprocessor postprocessor_;
};
} // namespace detection
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,95 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/pybind/main.h"
namespace fastdeploy {
void BindRKYOLO(pybind11::module& m) {
pybind11::class_<vision::detection::RKYOLOPreprocessor>(
m, "RKYOLOPreprocessor")
.def(pybind11::init<>())
.def("run", [](vision::detection::RKYOLOPreprocessor& self,
std::vector<pybind11::array>& im_list) {
std::vector<vision::FDMat> images;
for (size_t i = 0; i < im_list.size(); ++i) {
images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
}
std::vector<FDTensor> outputs;
if (!self.Run(&images, &outputs)) {
throw std::runtime_error("Failed to preprocess the input data in PaddleClasPreprocessor.");
}
for (size_t i = 0; i < outputs.size(); ++i) {
outputs[i].StopSharing();
}
return outputs;
})
.def_property("size", &vision::detection::RKYOLOPreprocessor::GetSize,
&vision::detection::RKYOLOPreprocessor::SetSize)
.def_property("padding_value", &vision::detection::RKYOLOPreprocessor::GetPaddingValue,
&vision::detection::RKYOLOPreprocessor::SetPaddingValue)
.def_property("is_scale_up", &vision::detection::RKYOLOPreprocessor::GetScaleUp,
&vision::detection::RKYOLOPreprocessor::SetScaleUp);
pybind11::class_<vision::detection::RKYOLOPostprocessor>(
m, "RKYOLOPostprocessor")
.def(pybind11::init<>())
.def("run", [](vision::detection::RKYOLOPostprocessor& self,
std::vector<FDTensor>& inputs) {
std::vector<vision::DetectionResult> results;
if (!self.Run(inputs, &results)) {
throw std::runtime_error("Failed to postprocess the runtime result in RKYOLOV5Postprocessor.");
}
return results;
})
.def("run", [](vision::detection::RKYOLOPostprocessor& self,
std::vector<pybind11::array>& input_array) {
std::vector<vision::DetectionResult> results;
std::vector<FDTensor> inputs;
PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
if (!self.Run(inputs, &results)) {
throw std::runtime_error("Failed to postprocess the runtime result in RKYOLOV5Postprocessor.");
}
return results;
})
.def_property("conf_threshold", &vision::detection::RKYOLOPostprocessor::GetConfThreshold,
&vision::detection::RKYOLOPostprocessor::SetConfThreshold)
.def_property("nms_threshold", &vision::detection::RKYOLOPostprocessor::GetNMSThreshold,
&vision::detection::RKYOLOPostprocessor::SetNMSThreshold);
pybind11::class_<vision::detection::RKYOLOV5, FastDeployModel>(m, "RKYOLOV5")
.def(pybind11::init<std::string,
RuntimeOption,
ModelFormat>())
.def("predict",
[](vision::detection::RKYOLOV5& self,
pybind11::array& data) {
auto mat = PyArrayToCvMat(data);
vision::DetectionResult res;
self.Predict(mat, &res);
return res;
})
.def("batch_predict", [](vision::detection::RKYOLOV5& self,
std::vector<pybind11::array>& data) {
std::vector<cv::Mat> images;
for (size_t i = 0; i < data.size(); ++i) {
images.push_back(PyArrayToCvMat(data[i]));
}
std::vector<vision::DetectionResult> results;
self.BatchPredict(images, &results);
return results;
})
.def_property_readonly("preprocessor", &vision::detection::RKYOLOV5::GetPreprocessor)
.def_property_readonly("postprocessor", &vision::detection::RKYOLOV5::GetPostprocessor);
}
} // namespace fastdeploy

View File

@@ -0,0 +1,93 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/detection/contrib/rknpu2/utils.h"
float clamp(float val, int min, int max) {
return val > min ? (val < max ? val : max) : min;
}
float Sigmoid(float x) { return 1.0 / (1.0 + expf(-x)); }
float UnSigmoid(float y) { return -1.0 * logf((1.0 / y) - 1.0); }
inline int32_t __clip(float val, float min, float max) {
float f = val <= min ? min : (val >= max ? max : val);
return f;
}
int8_t QntF32ToAffine(float f32, int32_t zp, float scale) {
float dst_val = (f32 / scale) + zp;
int8_t res = (int8_t)__clip(dst_val, -128, 127);
return res;
}
float DeqntAffineToF32(int8_t qnt, int32_t zp, float scale) {
return ((float)qnt - (float)zp) * scale;
}
static float CalculateOverlap(float xmin0, float ymin0, float xmax0, float ymax0, float xmin1, float ymin1, float xmax1, float ymax1)
{
float w = fmax(0.f, fmin(xmax0, xmax1) - fmax(xmin0, xmin1) + 1.0);
float h = fmax(0.f, fmin(ymax0, ymax1) - fmax(ymin0, ymin1) + 1.0);
float i = w * h;
float u = (xmax0 - xmin0 + 1.0) * (ymax0 - ymin0 + 1.0) + (xmax1 - xmin1 + 1.0) * (ymax1 - ymin1 + 1.0) - i;
return u <= 0.f ? 0.f : (i / u);
}
int NMS(int validCount,
std::vector<float> &outputLocations,
std::vector<int> &class_id,
std::vector<int> &order,
float threshold,
bool class_agnostic)
{
// printf("class_agnostic: %d\n", class_agnostic);
for (int i = 0; i < validCount; ++i)
{
if (order[i] == -1)
{
continue;
}
int n = order[i];
for (int j = i + 1; j < validCount; ++j)
{
int m = order[j];
if (m == -1)
{
continue;
}
if (!class_agnostic && class_id[n] != class_id[m]){
continue;
}
float xmin0 = outputLocations[n * 4 + 0];
float ymin0 = outputLocations[n * 4 + 1];
float xmax0 = outputLocations[n * 4 + 0] + outputLocations[n * 4 + 2];
float ymax0 = outputLocations[n * 4 + 1] + outputLocations[n * 4 + 3];
float xmin1 = outputLocations[m * 4 + 0];
float ymin1 = outputLocations[m * 4 + 1];
float xmax1 = outputLocations[m * 4 + 0] + outputLocations[m * 4 + 2];
float ymax1 = outputLocations[m * 4 + 1] + outputLocations[m * 4 + 3];
float iou = CalculateOverlap(xmin0, ymin0, xmax0, ymax0, xmin1, ymin1, xmax1, ymax1);
if (iou > threshold)
{
order[j] = -1;
}
}
}
return 0;
}

View File

@@ -0,0 +1,26 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
#include <vector>
typedef enum { RKYOLOX = 0, RKYOLOV5, RKYOLOV7, UNKNOWN } ModelType;
float clamp(float val, int min, int max);
float Sigmoid(float x);
float UnSigmoid(float y);
inline static int32_t __clip(float val, float min, float max);
int8_t QntF32ToAffine(float f32, int32_t zp, float scale);
float DeqntAffineToF32(int8_t qnt, int32_t zp, float scale);
int NMS(int validCount, std::vector<float>& outputLocations,
std::vector<int>& class_id, std::vector<int>& order, float threshold,
bool class_agnostic);

View File

@@ -27,6 +27,7 @@ void BindNanoDetPlus(pybind11::module& m);
void BindPPDet(pybind11::module& m);
void BindYOLOv7End2EndTRT(pybind11::module& m);
void BindYOLOv7End2EndORT(pybind11::module& m);
void BindRKYOLO(pybind11::module& m);
void BindDetection(pybind11::module& m) {
auto detection_module =
@@ -42,5 +43,6 @@ void BindDetection(pybind11::module& m) {
BindNanoDetPlus(detection_module);
BindYOLOv7End2EndTRT(detection_module);
BindYOLOv7End2EndORT(detection_module);
BindRKYOLO(detection_module);
}
} // namespace fastdeploy