Files
FastDeploy/fastdeploy/vision/detection/contrib/yolov5/preprocessor.h
WJJ1995 aa21272eaa [Model] Refactor YOLOv5 module (#562)
* add paddle_trt in benchmark

* update benchmark in device

* update benchmark

* update result doc

* fixed for CI

* update python api_docs

* update index.rst

* add runtime cpp examples

* deal with comments

* Update infer_paddle_tensorrt.py

* Add runtime quick start

* deal with comments

* fixed reused_input_tensors&&reused_output_tensors

* fixed docs

* fixed headpose typo

* fixed typo

* refactor yolov5

* update model infer

* refactor pybind for yolov5

* rm origin yolov5

* fixed bugs

* rm cuda preprocess

* fixed bugs

* fixed bugs

* fixed bug

* fixed bug

* fix pybind

* rm useless code

* add convert_and_permute

* fixed bugs

* fixed im_info for bs_predict

* fixed bug

* add bs_predict for yolov5

* Add runtime test and batch eval

* deal with comments

* fixed bug

* update testcase

* fixed batch eval bug

* fixed preprocess bug

Co-authored-by: Jason <928090362@qq.com>
Co-authored-by: Jason <jiangjiajun@baidu.com>
2022-11-15 09:48:16 +08:00

88 lines
3.0 KiB
C++
Executable File

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
namespace fastdeploy {
namespace vision {
namespace detection {
/*! @brief Preprocessor object for YOLOv5 serials model.
*/
class FASTDEPLOY_DECL YOLOv5Preprocessor {
public:
/** \brief Create a preprocessor instance for YOLOv5 serials model
*/
YOLOv5Preprocessor();
/** \brief Process the input image and prepare input tensors for runtime
*
* \param[in] images The input image data list, all the elements are returned by cv::imread()
* \param[in] outputs The output tensors which will feed in runtime
* \param[in] ims_info The shape info list, record input_shape and output_shape
* \return true if the preprocess successed, otherwise false
*/
bool Run(std::vector<FDMat>* images, std::vector<FDTensor>* outputs,
std::vector<std::map<std::string, std::array<float, 2>>>* ims_info);
/// Set target size, tuple of (width, height), default size = {640, 640}
void SetSize(const std::vector<int>& size) { size_ = size; }
/// Get target size, tuple of (width, height), default size = {640, 640}
std::vector<int> GetSize() const { return size_; }
/// Set padding value, size should be the same as channels
void SetPaddingValue(const std::vector<float>& padding_value) {
padding_value_ = padding_value;
}
/// Get padding value, size should be the same as channels
std::vector<float> GetPaddingValue() const { return padding_value_; }
protected:
bool Preprocess(FDMat* mat, FDTensor* output,
std::map<std::string, std::array<float, 2>>* im_info);
void LetterBox(FDMat* mat);
// target size, tuple of (width, height), default size = {640, 640}
std::vector<int> size_;
// padding value, size should be the same as channels
std::vector<float> padding_value_;
// only pad to the minimum rectange which height and width is times of stride
bool is_mini_pad_;
// while is_mini_pad = false and is_no_pad = true,
// will resize the image to the set size
bool is_no_pad_;
// if is_scale_up is false, the input image only can be zoom out,
// the maximum resize scale cannot exceed 1.0
bool is_scale_up_;
// padding stride, for is_mini_pad
int stride_;
// for offseting the boxes by classes when using NMS
float max_wh_;
};
} // namespace detection
} // namespace vision
} // namespace fastdeploy