From 9cd00ad4c563abe7de0e9aa7c7288f7ba6188bb8 Mon Sep 17 00:00:00 2001 From: guxukai <44280887+GodIsBoom@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:19:00 +0800 Subject: [PATCH] [Model] Refactoring code of YOLOv5Cls with new model type (#1237) * Refactoring code of YOLOv5Cls with new model type * fix reviewed problem * Normalize&HWC2CHW -> NormalizeAndPermute * remove cast() --- .../classification/yolov5cls/cpp/infer.cc | 9 +- .../classification/yolov5cls/python/infer.py | 3 +- fastdeploy/vision.h | 2 +- .../classification/contrib/yolov5cls.cc | 116 ------------------ .../vision/classification/contrib/yolov5cls.h | 70 ----------- .../contrib/yolov5cls/postprocessor.cc | 58 +++++++++ .../contrib/yolov5cls/postprocessor.h | 56 +++++++++ .../contrib/yolov5cls/preprocessor.cc | 88 +++++++++++++ .../contrib/yolov5cls/preprocessor.h | 57 +++++++++ .../contrib/yolov5cls/yolov5cls.cc | 80 ++++++++++++ .../contrib/yolov5cls/yolov5cls.h | 76 ++++++++++++ .../contrib/yolov5cls/yolov5cls_pybind.cc | 84 +++++++++++++ .../contrib/yolov5cls_pybind.cc | 32 ----- .../classification/contrib/yolov5cls.py | 110 +++++++++++++---- 14 files changed, 593 insertions(+), 248 deletions(-) delete mode 100755 fastdeploy/vision/classification/contrib/yolov5cls.cc delete mode 100755 fastdeploy/vision/classification/contrib/yolov5cls.h create mode 100644 fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.cc create mode 100644 fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.h create mode 100644 fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.cc create mode 100644 fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.h create mode 100755 fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.cc create mode 100755 fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.h create mode 100755 fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls_pybind.cc delete mode 100755 fastdeploy/vision/classification/contrib/yolov5cls_pybind.cc diff --git a/examples/vision/classification/yolov5cls/cpp/infer.cc b/examples/vision/classification/yolov5cls/cpp/infer.cc index 2920c95b0..048964f1e 100644 --- a/examples/vision/classification/yolov5cls/cpp/infer.cc +++ b/examples/vision/classification/yolov5cls/cpp/infer.cc @@ -27,10 +27,9 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::ClassifyResult res; - if (!model.Predict(&im, &res)) { + if (!model.Predict(im, &res)) { std::cerr << "Failed to predict." << std::endl; return; } @@ -48,10 +47,9 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::ClassifyResult res; - if (!model.Predict(&im, &res)) { + if (!model.Predict(im, &res)) { std::cerr << "Failed to predict." << std::endl; return; } @@ -71,10 +69,9 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::ClassifyResult res; - if (!model.Predict(&im, &res)) { + if (!model.Predict(im, &res)) { std::cerr << "Failed to predict." << std::endl; return; } diff --git a/examples/vision/classification/yolov5cls/python/infer.py b/examples/vision/classification/yolov5cls/python/infer.py index 55974a764..8be218b2e 100644 --- a/examples/vision/classification/yolov5cls/python/infer.py +++ b/examples/vision/classification/yolov5cls/python/infer.py @@ -44,8 +44,9 @@ args = parse_arguments() runtime_option = build_option(args) model = fd.vision.classification.YOLOv5Cls( args.model, runtime_option=runtime_option) +model.postprocessor.topk = args.topk # 预测图片分类结果 im = cv2.imread(args.image) -result = model.predict(im, args.topk) +result = model.predict(im) print(result) diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index 024302a56..9051c66fc 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -16,7 +16,7 @@ #include "fastdeploy/core/config.h" #ifdef ENABLE_VISION #include "fastdeploy/vision/classification/contrib/resnet.h" -#include "fastdeploy/vision/classification/contrib/yolov5cls.h" +#include "fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.h" #include "fastdeploy/vision/classification/ppcls/model.h" #include "fastdeploy/vision/detection/contrib/nanodet_plus.h" #include "fastdeploy/vision/detection/contrib/scaledyolov4.h" diff --git a/fastdeploy/vision/classification/contrib/yolov5cls.cc b/fastdeploy/vision/classification/contrib/yolov5cls.cc deleted file mode 100755 index 8dfc0a9a0..000000000 --- a/fastdeploy/vision/classification/contrib/yolov5cls.cc +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "fastdeploy/vision/classification/contrib/yolov5cls.h" - -#include "fastdeploy/utils/perf.h" -#include "fastdeploy/vision/utils/utils.h" - -namespace fastdeploy { -namespace vision { -namespace classification { - -YOLOv5Cls::YOLOv5Cls(const std::string& model_file, - const std::string& params_file, - const RuntimeOption& custom_option, - const ModelFormat& model_format) { - if (model_format == ModelFormat::ONNX) { - valid_cpu_backends = {Backend::OPENVINO, Backend::ORT}; - valid_gpu_backends = {Backend::ORT, Backend::TRT}; - } else { - valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; - valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; - } - runtime_option = custom_option; - runtime_option.model_format = model_format; - runtime_option.model_file = model_file; - runtime_option.params_file = params_file; - initialized = Initialize(); -} - -bool YOLOv5Cls::Initialize() { - // preprocess parameters - size = {224, 224}; - if (!InitRuntime()) { - FDERROR << "Failed to initialize fastdeploy backend." << std::endl; - return false; - } - return true; -} - -bool YOLOv5Cls::Preprocess(Mat* mat, FDTensor* output, - const std::vector& size) { - // CenterCrop - int crop_size = std::min(mat->Height(), mat->Width()); - CenterCrop::Run(mat, crop_size, crop_size); - Resize::Run(mat, size[0], size[1], -1, -1, cv::INTER_LINEAR); - // Normalize - BGR2RGB::Run(mat); - std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; - std::vector beta = {0.0f, 0.0f, 0.0f}; - Convert::Run(mat, alpha, beta); - std::vector mean = {0.485f, 0.456f, 0.406f}; - std::vector std = {0.229f, 0.224f, 0.225f}; - Normalize::Run(mat, mean, std, false); - HWC2CHW::Run(mat); - Cast::Run(mat, "float"); - - mat->ShareWithTensor(output); - output->shape.insert(output->shape.begin(), 1); - return true; -} - -bool YOLOv5Cls::Postprocess(const FDTensor& infer_result, - ClassifyResult* result, int topk) { - // Softmax - FDTensor infer_result_softmax; - function::Softmax(infer_result, &infer_result_softmax, 1); - int num_classes = infer_result_softmax.shape[1]; - const float* infer_result_buffer = - reinterpret_cast(infer_result_softmax.Data()); - topk = std::min(num_classes, topk); - result->label_ids = - utils::TopKIndices(infer_result_buffer, num_classes, topk); - result->scores.resize(topk); - for (int i = 0; i < topk; ++i) { - result->scores[i] = *(infer_result_buffer + result->label_ids[i]); - } - return true; -} - -bool YOLOv5Cls::Predict(cv::Mat* im, ClassifyResult* result, int topk) { - Mat mat(*im); - std::vector input_tensors(1); - if (!Preprocess(&mat, &input_tensors[0], size)) { - FDERROR << "Failed to preprocess input image." << std::endl; - return false; - } - - input_tensors[0].name = InputInfoOfRuntime(0).name; - std::vector output_tensors(1); - if (!Infer(input_tensors, &output_tensors)) { - FDERROR << "Failed to inference." << std::endl; - return false; - } - - if (!Postprocess(output_tensors[0], result, topk)) { - FDERROR << "Failed to post process." << std::endl; - return false; - } - return true; -} - -} // namespace classification -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls.h b/fastdeploy/vision/classification/contrib/yolov5cls.h deleted file mode 100755 index bbf93e9e4..000000000 --- a/fastdeploy/vision/classification/contrib/yolov5cls.h +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include "fastdeploy/fastdeploy_model.h" -#include "fastdeploy/vision/common/processors/transform.h" -#include "fastdeploy/vision/common/result.h" - -namespace fastdeploy { -namespace vision { -/** \brief All image classification model APIs are defined inside this namespace - * - */ -namespace classification { - -/*! @brief YOLOv5Cls model object used when to load a YOLOv5Cls model exported by YOLOv5 - */ -class FASTDEPLOY_DECL YOLOv5Cls : public FastDeployModel { - public: - /** \brief Set path of model file and configuration file, and the configuration of runtime - * - * \param[in] model_file Path of model file, e.g yolov5cls/yolov5n-cls.onnx - * \param[in] params_file Path of parameter file, if the model format is ONNX, this parameter will be ignored - * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends` - * \param[in] model_format Model format of the loaded model, default is ONNX format - */ - YOLOv5Cls(const std::string& model_file, const std::string& params_file = "", - const RuntimeOption& custom_option = RuntimeOption(), - const ModelFormat& model_format = ModelFormat::ONNX); - - /// Get model's name - virtual std::string ModelName() const { return "yolov5cls"; } - - /** \brief Predict the classification result for an input image - * - * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format - * \param[in] result The output classification result will be writen to this structure - * \param[in] topk Returns the topk classification result with the highest predicted probability, the default is 1 - * \return true if the prediction successed, otherwise false - */ - virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1); - - /// Preprocess image size, the default is (224, 224) - std::vector size; - - private: - bool Initialize(); - /// Preprocess an input image, and set the preprocessed results to `outputs` - bool Preprocess(Mat* mat, FDTensor* output, - const std::vector& size = {224, 224}); - - /// Postprocess the inferenced results, and set the final result to `result` - bool Postprocess(const FDTensor& infer_result, ClassifyResult* result, - int topk = 1); -}; - -} // namespace classification -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.cc b/fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.cc new file mode 100644 index 000000000..f4c40cfc8 --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.cc @@ -0,0 +1,58 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace classification { + +YOLOv5ClsPostprocessor::YOLOv5ClsPostprocessor() { + topk_ = 1; +} + +bool YOLOv5ClsPostprocessor::Run( + const std::vector &tensors, std::vector *results, + const std::vector>> &ims_info) { + int batch = tensors[0].shape[0]; + FDTensor infer_result = tensors[0]; + FDTensor infer_result_softmax; + function::Softmax(infer_result, &infer_result_softmax, 1); + results->resize(batch); + + for (size_t bs = 0; bs < batch; ++bs) { + (*results)[bs].Clear(); + // output (1,1000) score classnum 1000 + int num_classes = infer_result_softmax.shape[1]; + const float* infer_result_buffer = + reinterpret_cast(infer_result_softmax.Data()) + bs * infer_result_softmax.shape[1]; + topk_ = std::min(num_classes, topk_); + (*results)[bs].label_ids = + utils::TopKIndices(infer_result_buffer, num_classes, topk_); + (*results)[bs].scores.resize(topk_); + for (int i = 0; i < topk_; ++i) { + (*results)[bs].scores[i] = *(infer_result_buffer + (*results)[bs].label_ids[i]); + } + + if ((*results)[bs].label_ids.size() == 0) { + return true; + } + } + return true; +} + +} // namespace classification +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.h b/fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.h new file mode 100644 index 000000000..8fed59617 --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.h @@ -0,0 +1,56 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { +namespace vision { + +namespace classification { +/*! @brief Postprocessor object for YOLOv5Cls serials model. + */ +class FASTDEPLOY_DECL YOLOv5ClsPostprocessor { + public: + /** \brief Create a postprocessor instance for YOLOv5Cls serials model + */ + YOLOv5ClsPostprocessor(); + + /** \brief Process the result of runtime and fill to ClassifyResult structure + * + * \param[in] tensors The inference result from runtime + * \param[in] result The output result of classification + * \param[in] ims_info The shape info list, record input_shape and output_shape + * \return true if the postprocess successed, otherwise false + */ + bool Run(const std::vector& tensors, + std::vector* results, + const std::vector>>& ims_info); + + /// Set topk, default 1 + void SetTopK(const int& topk) { + topk_ = topk; + } + + /// Get topk, default 1 + float GetTopK() const { return topk_; } + + protected: + int topk_; +}; + +} // namespace classification +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.cc b/fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.cc new file mode 100644 index 000000000..e252ba0ee --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.cc @@ -0,0 +1,88 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.h" +#include "fastdeploy/function/concat.h" + +namespace fastdeploy { +namespace vision { +namespace classification { + +YOLOv5ClsPreprocessor::YOLOv5ClsPreprocessor() { + size_ = {224, 224}; //{h,w} +} + +bool YOLOv5ClsPreprocessor::Preprocess(FDMat* mat, FDTensor* output, + std::map>* im_info) { + // Record the shape of image and the shape of preprocessed image + (*im_info)["input_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + // process after image load + double ratio = (size_[0] * 1.0) / std::max(static_cast(mat->Height()), + static_cast(mat->Width())); + + // yolov5cls's preprocess steps + // 1. CenterCrop + // 2. Normalize + // CenterCrop + int crop_size = std::min(mat->Height(), mat->Width()); + CenterCrop::Run(mat, crop_size, crop_size); + Resize::Run(mat, size_[0], size_[1], -1, -1, cv::INTER_LINEAR); + // Normalize + BGR2RGB::Run(mat); + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); + std::vector mean = {0.485f, 0.456f, 0.406f}; + std::vector std = {0.229f, 0.224f, 0.225f}; + NormalizeAndPermute::Run(mat, mean, std, false); + + // Record output shape of preprocessed image + (*im_info)["output_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + mat->ShareWithTensor(output); + output->ExpandDim(0); // reshape to n, h, w, c + return true; +} + +bool YOLOv5ClsPreprocessor::Run(std::vector* images, std::vector* outputs, + std::vector>>* ims_info) { + if (images->size() == 0) { + FDERROR << "The size of input images should be greater than 0." << std::endl; + return false; + } + ims_info->resize(images->size()); + outputs->resize(1); + // Concat all the preprocessed data to a batch tensor + std::vector tensors(images->size()); + for (size_t i = 0; i < images->size(); ++i) { + if (!Preprocess(&(*images)[i], &tensors[i], &(*ims_info)[i])) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + } + + if (tensors.size() == 1) { + (*outputs)[0] = std::move(tensors[0]); + } else { + function::Concat(tensors, &((*outputs)[0]), 0); + } + return true; +} + +} // namespace classification +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.h b/fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.h new file mode 100644 index 000000000..a075df613 --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.h @@ -0,0 +1,57 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { +namespace vision { + +namespace classification { +/*! @brief Preprocessor object for YOLOv5Cls serials model. + */ +class FASTDEPLOY_DECL YOLOv5ClsPreprocessor { + public: + /** \brief Create a preprocessor instance for YOLOv5Cls serials model + */ + YOLOv5ClsPreprocessor(); + + /** \brief Process the input image and prepare input tensors for runtime + * + * \param[in] images The input image data list, all the elements are returned by cv::imread() + * \param[in] outputs The output tensors which will feed in runtime + * \param[in] ims_info The shape info list, record input_shape and output_shape + * \return true if the preprocess successed, otherwise false + */ + bool Run(std::vector* images, std::vector* outputs, + std::vector>>* ims_info); + + /// Set target size, tuple of (width, height), default size = {224, 224} + void SetSize(const std::vector& size) { size_ = size; } + + /// Get target size, tuple of (width, height), default size = {224, 224} + std::vector GetSize() const { return size_; } + + protected: + bool Preprocess(FDMat* mat, FDTensor* output, + std::map>* im_info); + + // target size, tuple of (width, height), default size = {224, 224} + std::vector size_; +}; + +} // namespace classification +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.cc b/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.cc new file mode 100755 index 000000000..84cb8d7b5 --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace classification { + +YOLOv5Cls::YOLOv5Cls(const std::string& model_file, const std::string& params_file, + const RuntimeOption& custom_option, + const ModelFormat& model_format) { + if (model_format == ModelFormat::ONNX) { + valid_cpu_backends = {Backend::OPENVINO, Backend::ORT}; + valid_gpu_backends = {Backend::ORT, Backend::TRT}; + } else { + valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOv5Cls::Initialize() { + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool YOLOv5Cls::Predict(const cv::Mat& im, ClassifyResult* result) { + std::vector results; + if (!BatchPredict({im}, &results)) { + return false; + } + *result = std::move(results[0]); + return true; +} + +bool YOLOv5Cls::BatchPredict(const std::vector& images, std::vector* results) { + std::vector>> ims_info; + std::vector fd_images = WrapMat(images); + + if (!preprocessor_.Run(&fd_images, &reused_input_tensors_, &ims_info)) { + FDERROR << "Failed to preprocess the input image." << std::endl; + return false; + } + + reused_input_tensors_[0].name = InputInfoOfRuntime(0).name; + if (!Infer(reused_input_tensors_, &reused_output_tensors_)) { + FDERROR << "Failed to inference by runtime." << std::endl; + return false; + } + + if (!postprocessor_.Run(reused_output_tensors_, results, ims_info)) { + FDERROR << "Failed to postprocess the inference results by runtime." << std::endl; + return false; + } + return true; +} + +} // namespace classification +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.h b/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.h new file mode 100755 index 000000000..5eab4eeeb --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls.h @@ -0,0 +1,76 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/classification/contrib/yolov5cls/preprocessor.h" +#include "fastdeploy/vision/classification/contrib/yolov5cls/postprocessor.h" + +namespace fastdeploy { +namespace vision { +namespace classification { +/*! @brief YOLOv5Cls model object used when to load a YOLOv5Cls model exported by YOLOv5Cls. + */ +class FASTDEPLOY_DECL YOLOv5Cls : public FastDeployModel { + public: + /** \brief Set path of model file and the configuration of runtime. + * + * \param[in] model_file Path of model file, e.g ./yolov5cls.onnx + * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored + * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends" + * \param[in] model_format Model format of the loaded model, default is ONNX format + */ + YOLOv5Cls(const std::string& model_file, const std::string& params_file = "", + const RuntimeOption& custom_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::ONNX); + + std::string ModelName() const { return "yolov5cls"; } + + /** \brief Predict the classification result for an input image + * + * \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format + * \param[in] result The output classification result will be writen to this structure + * \return true if the prediction successed, otherwise false + */ + virtual bool Predict(const cv::Mat& img, ClassifyResult* result); + + /** \brief Predict the classification results for a batch of input images + * + * \param[in] imgs, The input image list, each element comes from cv::imread() + * \param[in] results The output classification result list + * \return true if the prediction successed, otherwise false + */ + virtual bool BatchPredict(const std::vector& imgs, + std::vector* results); + + /// Get preprocessor reference of YOLOv5Cls + virtual YOLOv5ClsPreprocessor& GetPreprocessor() { + return preprocessor_; + } + + /// Get postprocessor reference of YOLOv5Cls + virtual YOLOv5ClsPostprocessor& GetPostprocessor() { + return postprocessor_; + } + + protected: + bool Initialize(); + YOLOv5ClsPreprocessor preprocessor_; + YOLOv5ClsPostprocessor postprocessor_; +}; + +} // namespace classification +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls_pybind.cc b/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls_pybind.cc new file mode 100755 index 000000000..99e277d1f --- /dev/null +++ b/fastdeploy/vision/classification/contrib/yolov5cls/yolov5cls_pybind.cc @@ -0,0 +1,84 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindYOLOv5Cls(pybind11::module& m) { + pybind11::class_( + m, "YOLOv5ClsPreprocessor") + .def(pybind11::init<>()) + .def("run", [](vision::classification::YOLOv5ClsPreprocessor& self, std::vector& im_list) { + std::vector images; + for (size_t i = 0; i < im_list.size(); ++i) { + images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i]))); + } + std::vector outputs; + std::vector>> ims_info; + if (!self.Run(&images, &outputs, &ims_info)) { + throw std::runtime_error("raise Exception('Failed to preprocess the input data in YOLOv5ClsPreprocessor.')"); + } + for (size_t i = 0; i < outputs.size(); ++i) { + outputs[i].StopSharing(); + } + return make_pair(outputs, ims_info); + }) + .def_property("size", &vision::classification::YOLOv5ClsPreprocessor::GetSize, &vision::classification::YOLOv5ClsPreprocessor::SetSize); + + pybind11::class_( + m, "YOLOv5ClsPostprocessor") + .def(pybind11::init<>()) + .def("run", [](vision::classification::YOLOv5ClsPostprocessor& self, std::vector& inputs, + const std::vector>>& ims_info) { + std::vector results; + if (!self.Run(inputs, &results, ims_info)) { + throw std::runtime_error("raise Exception('Failed to postprocess the runtime result in YOLOv5ClsPostprocessor.')"); + } + return results; + }) + .def("run", [](vision::classification::YOLOv5ClsPostprocessor& self, std::vector& input_array, + const std::vector>>& ims_info) { + std::vector results; + std::vector inputs; + PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true); + if (!self.Run(inputs, &results, ims_info)) { + throw std::runtime_error("raise Exception('Failed to postprocess the runtime result in YOLOv5ClsPostprocessor.')"); + } + return results; + }) + .def_property("topk", &vision::classification::YOLOv5ClsPostprocessor::GetTopK, &vision::classification::YOLOv5ClsPostprocessor::SetTopK); + + pybind11::class_(m, "YOLOv5Cls") + .def(pybind11::init()) + .def("predict", + [](vision::classification::YOLOv5Cls& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::ClassifyResult res; + self.Predict(mat, &res); + return res; + }) + .def("batch_predict", [](vision::classification::YOLOv5Cls& self, std::vector& data) { + std::vector images; + for (size_t i = 0; i < data.size(); ++i) { + images.push_back(PyArrayToCvMat(data[i])); + } + std::vector results; + self.BatchPredict(images, &results); + return results; + }) + .def_property_readonly("preprocessor", &vision::classification::YOLOv5Cls::GetPreprocessor) + .def_property_readonly("postprocessor", &vision::classification::YOLOv5Cls::GetPostprocessor); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/classification/contrib/yolov5cls_pybind.cc b/fastdeploy/vision/classification/contrib/yolov5cls_pybind.cc deleted file mode 100755 index 5a42dec38..000000000 --- a/fastdeploy/vision/classification/contrib/yolov5cls_pybind.cc +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "fastdeploy/pybind/main.h" - -namespace fastdeploy { -void BindYOLOv5Cls(pybind11::module& m) { - pybind11::class_( - m, "YOLOv5Cls") - .def(pybind11::init()) - .def("predict", - [](vision::classification::YOLOv5Cls& self, pybind11::array& data, - int topk = 1) { - auto mat = PyArrayToCvMat(data); - vision::ClassifyResult res; - self.Predict(&mat, &res, topk); - return res; - }) - .def_readwrite("size", &vision::classification::YOLOv5Cls::size); -} -} // namespace fastdeploy diff --git a/python/fastdeploy/vision/classification/contrib/yolov5cls.py b/python/fastdeploy/vision/classification/contrib/yolov5cls.py index 5f401fa1d..a7d372a08 100644 --- a/python/fastdeploy/vision/classification/contrib/yolov5cls.py +++ b/python/fastdeploy/vision/classification/contrib/yolov5cls.py @@ -18,18 +18,78 @@ from .... import FastDeployModel, ModelFormat from .... import c_lib_wrap as C +class YOLOv5ClsPreprocessor: + def __init__(self): + """Create a preprocessor for YOLOv5Cls + """ + self._preprocessor = C.vision.classification.YOLOv5ClsPreprocessor() + + def run(self, input_ims): + """Preprocess input images for YOLOv5Cls + + :param: input_ims: (list of numpy.ndarray)The input image + :return: list of FDTensor + """ + return self._preprocessor.run(input_ims) + + @property + def size(self): + """ + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [224, 224] + """ + return self._preprocessor.size + + @size.setter + def size(self, wh): + assert isinstance(wh, (list, tuple)),\ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2,\ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self._preprocessor.size = wh + + +class YOLOv5ClsPostprocessor: + def __init__(self): + """Create a postprocessor for YOLOv5Cls + """ + self._postprocessor = C.vision.classification.YOLOv5ClsPostprocessor() + + def run(self, runtime_results, ims_info): + """Postprocess the runtime results for YOLOv5Cls + + :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime + :param: ims_info: (list of dict)Record input_shape and output_shape + :return: list of ClassifyResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size) + """ + return self._postprocessor.run(runtime_results, ims_info) + + @property + def topk(self): + """ + topk for postprocessing, default is 1 + """ + return self._postprocessor.topk + + @topk.setter + def topk(self, topk): + assert isinstance(topk, int),\ + "The value to set `top k` must be type of int." + self._postprocessor.topk = topk + + class YOLOv5Cls(FastDeployModel): def __init__(self, model_file, params_file="", runtime_option=None, model_format=ModelFormat.ONNX): - """Load a image classification model exported by YOLOv5. + """Load a YOLOv5Cls model exported by YOLOv5Cls. - :param model_file: (str)Path of model file, e.g yolov5cls/yolov5n-cls.onnx - :param params_file: (str)Path of parameters file, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string + :param model_file: (str)Path of model file, e.g ./YOLOv5Cls.onnx + :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU - :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model, default is ONNX + :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model """ super(YOLOv5Cls, self).__init__(runtime_option) @@ -37,33 +97,39 @@ class YOLOv5Cls(FastDeployModel): assert model_format == ModelFormat.ONNX, "YOLOv5Cls only support model format of ModelFormat.ONNX now." self._model = C.vision.classification.YOLOv5Cls( model_file, params_file, self._runtime_option, model_format) + assert self.initialized, "YOLOv5Cls initialize failed." - def predict(self, input_image, topk=1): + def predict(self, input_image): """Classify an input image - :param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format - :param topk: (int)The topk result by the classify confidence score, default 1 + :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format :return: ClassifyResult """ + assert input_image is not None, "Input image is None." + return self._model.predict(input_image) - return self._model.predict(input_image, topk) + def batch_predict(self, images): + """Classify a batch of input image + + :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format + :return list of ClassifyResult + """ + + return self._model.batch_predict(images) @property - def size(self): - """ - Returns the preprocess image size, default is (224, 224) - """ - return self._model.size + def preprocessor(self): + """Get YOLOv5ClsPreprocessor object of the loaded model - @size.setter - def size(self, wh): + :return YOLOv5ClsPreprocessor """ - Set the preprocess image size + return self._model.preprocessor + + @property + def postprocessor(self): + """Get YOLOv5ClsPostprocessor object of the loaded model + + :return YOLOv5ClsPostprocessor """ - assert isinstance(wh, (list, tuple)),\ - "The value to set `size` must be type of tuple or list." - assert len(wh) == 2,\ - "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( - len(wh)) - self._model.size = wh + return self._model.postprocessor