[Model] Refactor PaddleClas module (#505)

* Refactor the PaddleClas module

* fix bug

* remove debug code

* clean unused code

* support pybind

* Update fd_tensor.h

* Update fd_tensor.cc

* temporary revert python api

* fix ci error

* fix code style problem
This commit is contained in:
Jason
2022-11-07 19:33:47 +08:00
committed by GitHub
parent a0a8ace174
commit 3589c0fa94
15 changed files with 527 additions and 142 deletions

0
fastdeploy/pybind/runtime.cc Executable file → Normal file
View File

View File

@@ -14,9 +14,6 @@
#include "fastdeploy/vision/classification/ppcls/model.h" #include "fastdeploy/vision/classification/ppcls/model.h"
#include "fastdeploy/vision/utils/utils.h"
#include "yaml-cpp/yaml.h"
namespace fastdeploy { namespace fastdeploy {
namespace vision { namespace vision {
namespace classification { namespace classification {
@@ -25,8 +22,7 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
const std::string& params_file, const std::string& params_file,
const std::string& config_file, const std::string& config_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const ModelFormat& model_format) { const ModelFormat& model_format) : preprocessor_(config_file) {
config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER, valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER,
Backend::LITE}; Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
@@ -38,11 +34,6 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
} }
bool PaddleClasModel::Initialize() { bool PaddleClasModel::Initialize() {
if (!BuildPreprocessPipelineFromConfig()) {
FDERROR << "Failed to build preprocess pipeline from configuration file."
<< std::endl;
return false;
}
if (!InitRuntime()) { if (!InitRuntime()) {
FDERROR << "Failed to initialize fastdeploy backend." << std::endl; FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
return false; return false;
@@ -50,105 +41,41 @@ bool PaddleClasModel::Initialize() {
return true; return true;
} }
bool PaddleClasModel::BuildPreprocessPipelineFromConfig() {
processors_.clear();
YAML::Node cfg;
try {
cfg = YAML::LoadFile(config_file_);
} catch (YAML::BadFile& e) {
FDERROR << "Failed to load yaml file " << config_file_
<< ", maybe you should check this file." << std::endl;
return false;
}
auto preprocess_cfg = cfg["PreProcess"]["transform_ops"];
processors_.push_back(std::make_shared<BGR2RGB>());
for (const auto& op : preprocess_cfg) {
FDASSERT(op.IsMap(),
"Require the transform information in yaml be Map type.");
auto op_name = op.begin()->first.as<std::string>();
if (op_name == "ResizeImage") {
int target_size = op.begin()->second["resize_short"].as<int>();
bool use_scale = false;
int interp = 1;
processors_.push_back(
std::make_shared<ResizeByShort>(target_size, 1, use_scale));
} else if (op_name == "CropImage") {
int width = op.begin()->second["size"].as<int>();
int height = op.begin()->second["size"].as<int>();
processors_.push_back(std::make_shared<CenterCrop>(width, height));
} else if (op_name == "NormalizeImage") {
auto mean = op.begin()->second["mean"].as<std::vector<float>>();
auto std = op.begin()->second["std"].as<std::vector<float>>();
auto scale = op.begin()->second["scale"].as<float>();
FDASSERT((scale - 0.00392157) < 1e-06 && (scale - 0.00392157) > -1e-06,
"Only support scale in Normalize be 0.00392157, means the pixel "
"is in range of [0, 255].");
processors_.push_back(std::make_shared<Normalize>(mean, std));
} else if (op_name == "ToCHWImage") {
processors_.push_back(std::make_shared<HWC2CHW>());
} else {
FDERROR << "Unexcepted preprocess operator: " << op_name << "."
<< std::endl;
return false;
}
}
return true;
}
bool PaddleClasModel::Preprocess(Mat* mat, FDTensor* output) {
for (size_t i = 0; i < processors_.size(); ++i) {
if (!(*(processors_[i].get()))(mat)) {
FDERROR << "Failed to process image data in " << processors_[i]->Name()
<< "." << std::endl;
return false;
}
}
int channel = mat->Channels();
int width = mat->Width();
int height = mat->Height();
output->name = InputInfoOfRuntime(0).name;
output->SetExternalData({1, channel, height, width}, FDDataType::FP32,
mat->Data());
return true;
}
bool PaddleClasModel::Postprocess(const FDTensor& infer_result,
ClassifyResult* result, int topk) {
int num_classes = infer_result.shape[1];
const float* infer_result_buffer =
reinterpret_cast<const float*>(infer_result.Data());
topk = std::min(num_classes, topk);
result->label_ids =
utils::TopKIndices(infer_result_buffer, num_classes, topk);
result->scores.resize(topk);
for (int i = 0; i < topk; ++i) {
result->scores[i] = *(infer_result_buffer + result->label_ids[i]);
}
return true;
}
bool PaddleClasModel::Predict(cv::Mat* im, ClassifyResult* result, int topk) { bool PaddleClasModel::Predict(cv::Mat* im, ClassifyResult* result, int topk) {
Mat mat(*im); postprocessor_.SetTopk(topk);
std::vector<FDTensor> processed_data(1); if (!Predict(*im, result)) {
if (!Preprocess(&mat, &(processed_data[0]))) { return false;
FDERROR << "Failed to preprocess input data while using model:" }
<< ModelName() << "." << std::endl; return true;
}
bool PaddleClasModel::Predict(const cv::Mat& im, ClassifyResult* result) {
std::vector<ClassifyResult> results;
if (!BatchPredict({im}, &results)) {
return false;
}
*result = std::move(results[0]);
return true;
}
bool PaddleClasModel::BatchPredict(const std::vector<cv::Mat>& images, std::vector<ClassifyResult>* results) {
std::vector<FDMat> fd_images = WrapMat(images);
if (!preprocessor_.Run(&fd_images, &reused_input_tensors)) {
FDERROR << "Failed to preprocess the input image." << std::endl;
return false; return false;
} }
std::vector<FDTensor> infer_result(1); reused_input_tensors[0].name = InputInfoOfRuntime(0).name;
if (!Infer(processed_data, &infer_result)) { if (!Infer(reused_input_tensors, &reused_output_tensors)) {
FDERROR << "Failed to inference while using model:" << ModelName() << "." FDERROR << "Failed to inference by runtime." << std::endl;
<< std::endl;
return false; return false;
} }
if (!Postprocess(infer_result[0], result, topk)) { if (!postprocessor_.Run(reused_output_tensors, results)) {
FDERROR << "Failed to postprocess while using model:" << ModelName() << "." FDERROR << "Failed to postprocess the inference results by runtime." << std::endl;
<< std::endl;
return false; return false;
} }
return true; return true;
} }

View File

@@ -14,8 +14,8 @@
#pragma once #pragma once
#include "fastdeploy/fastdeploy_model.h" #include "fastdeploy/fastdeploy_model.h"
#include "fastdeploy/vision/common/processors/transform.h" #include "fastdeploy/vision/classification/ppcls/preprocessor.h"
#include "fastdeploy/vision/common/result.h" #include "fastdeploy/vision/classification/ppcls/postprocessor.h"
namespace fastdeploy { namespace fastdeploy {
namespace vision { namespace vision {
@@ -43,28 +43,46 @@ class FASTDEPLOY_DECL PaddleClasModel : public FastDeployModel {
/// Get model's name /// Get model's name
virtual std::string ModelName() const { return "PaddleClas/Model"; } virtual std::string ModelName() const { return "PaddleClas/Model"; }
/** \brief Predict the classification result for an input image /** \brief DEPRECATED Predict the classification result for an input image, remove at 1.0 version
* *
* \param[in] im The input image data, comes from cv::imread() * \param[in] im The input image data, comes from cv::imread()
* \param[in] result The output classification result will be writen to this structure * \param[in] result The output classification result will be writen to this structure
* \param[in] topk (int)The topk result by the classify confidence score, default 1
* \return true if the prediction successed, otherwise false * \return true if the prediction successed, otherwise false
*/ */
// TODO(jiangjiajun) Batch is on the way
virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1); virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1);
/** \brief Predict the classification result for an input image
*
* \param[in] img The input image data, comes from cv::imread()
* \param[in] result The output classification result
* \return true if the prediction successed, otherwise false
*/
virtual bool Predict(const cv::Mat& img, ClassifyResult* result);
/** \brief Predict the classification results for a batch of input images
*
* \param[in] imgs, The input image list, each element comes from cv::imread()
* \param[in] results The output classification result list
* \return true if the prediction successed, otherwise false
*/
virtual bool BatchPredict(const std::vector<cv::Mat>& imgs,
std::vector<ClassifyResult>* results);
/// Get preprocessor reference of PaddleClasModel
virtual PaddleClasPreprocessor& GetPreprocessor() {
return preprocessor_;
}
/// Get postprocessor reference of PaddleClasModel
virtual PaddleClasPostprocessor& GetPostprocessor() {
return postprocessor_;
}
protected: protected:
bool Initialize(); bool Initialize();
PaddleClasPreprocessor preprocessor_;
bool BuildPreprocessPipelineFromConfig(); PaddleClasPostprocessor postprocessor_;
bool Preprocess(Mat* mat, FDTensor* outputs);
bool Postprocess(const FDTensor& infer_result, ClassifyResult* result,
int topk = 1);
std::vector<std::shared_ptr<Processor>> processors_;
std::string config_file_;
}; };
typedef PaddleClasModel PPLCNet; typedef PaddleClasModel PPLCNet;

View File

@@ -0,0 +1,53 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/classification/ppcls/postprocessor.h"
#include "fastdeploy/vision/utils/utils.h"
namespace fastdeploy {
namespace vision {
namespace classification {
PaddleClasPostprocessor::PaddleClasPostprocessor(int topk) {
topk_ = topk;
initialized_ = true;
}
bool PaddleClasPostprocessor::Run(const std::vector<FDTensor>& infer_result, std::vector<ClassifyResult>* results) {
if (!initialized_) {
FDERROR << "Postprocessor is not initialized." << std::endl;
return false;
}
int batch = infer_result[0].shape[0];
int num_classes = infer_result[0].shape[1];
const float* infer_result_data = reinterpret_cast<const float*>(infer_result[0].Data());
results->resize(batch);
int topk = std::min(num_classes, topk_);
for (int i = 0; i < batch; ++i) {
(*results)[i].label_ids = utils::TopKIndices(infer_result_data + i * num_classes, num_classes, topk);
(*results)[i].scores.resize(topk);
for (int j = 0; j < topk; ++j) {
(*results)[i].scores[j] = infer_result_data[i * num_classes + (*results)[i].label_ids[j]];
}
}
return true;
}
} // namespace classification
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,55 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
namespace fastdeploy {
namespace vision {
namespace classification {
/*! @brief Postprocessor object for PaddleClas serials model.
*/
class FASTDEPLOY_DECL PaddleClasPostprocessor {
public:
/** \brief Create a postprocessor instance for PaddleClas serials model
*
* \param[in] topk The topk result filtered by the classify confidence score, default 1
*/
explicit PaddleClasPostprocessor(int topk = 1);
/** \brief Process the result of runtime and fill to ClassifyResult structure
*
* \param[in] tensors The inference result from runtime
* \param[in] result The output result of classification
* \return true if the postprocess successed, otherwise false
*/
bool Run(const std::vector<FDTensor>& tensors,
std::vector<ClassifyResult>* result);
/// Set topk value
void SetTopk(int topk) { topk_ = topk; }
/// Get topk value
int GetTopk() const { return topk_; }
private:
int topk_ = 1;
bool initialized_ = false;
};
} // namespace classification
} // namespace vision
} // namespace fastdeploy

View File

@@ -15,16 +15,62 @@
namespace fastdeploy { namespace fastdeploy {
void BindPaddleClas(pybind11::module& m) { void BindPaddleClas(pybind11::module& m) {
pybind11::class_<vision::classification::PaddleClasPreprocessor>(
m, "PaddleClasPreprocessor")
.def(pybind11::init<std::string>())
.def("run", [](vision::classification::PaddleClasPreprocessor& self, std::vector<pybind11::array>& im_list) {
std::vector<vision::FDMat> images;
for (size_t i = 0; i < im_list.size(); ++i) {
images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
}
std::vector<FDTensor> outputs;
if (!self.Run(&images, &outputs)) {
pybind11::eval("raise Exception('Failed to preprocess the input data in PaddleClasPreprocessor.')");
}
return outputs;
});
pybind11::class_<vision::classification::PaddleClasPostprocessor>(
m, "PaddleClasPostprocessor")
.def(pybind11::init<int>())
.def("run", [](vision::classification::PaddleClasPostprocessor& self, std::vector<FDTensor>& inputs) {
std::vector<vision::ClassifyResult> results;
if (!self.Run(inputs, &results)) {
pybind11::eval("raise Exception('Failed to postprocess the runtime result in PaddleClasPostprocessor.')");
}
return results;
})
.def("run", [](vision::classification::PaddleClasPostprocessor& self, std::vector<pybind11::array>& input_array) {
std::vector<vision::ClassifyResult> results;
std::vector<FDTensor> inputs;
PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
if (!self.Run(inputs, &results)) {
pybind11::eval("raise Exception('Failed to postprocess the runtime result in PaddleClasPostprocessor.')");
}
return results;
})
.def_property("topk", &vision::classification::PaddleClasPostprocessor::GetTopk, &vision::classification::PaddleClasPostprocessor::SetTopk);
pybind11::class_<vision::classification::PaddleClasModel, FastDeployModel>( pybind11::class_<vision::classification::PaddleClasModel, FastDeployModel>(
m, "PaddleClasModel") m, "PaddleClasModel")
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption, .def(pybind11::init<std::string, std::string, std::string, RuntimeOption,
ModelFormat>()) ModelFormat>())
.def("predict", [](vision::classification::PaddleClasModel& self, .def("predict", [](vision::classification::PaddleClasModel& self, pybind11::array& data) {
pybind11::array& data, int topk = 1) { cv::Mat im = PyArrayToCvMat(data);
auto mat = PyArrayToCvMat(data); vision::ClassifyResult result;
vision::ClassifyResult res; self.Predict(im, &result);
self.Predict(&mat, &res, topk); return result;
return res; })
}); .def("batch_predict", [](vision::classification::PaddleClasModel& self, std::vector<pybind11::array>& data) {
std::vector<cv::Mat> images;
for (size_t i = 0; i < data.size(); ++i) {
images.push_back(PyArrayToCvMat(data[i]));
}
std::vector<vision::ClassifyResult> results;
self.BatchPredict(images, &results);
return results;
})
.def_property_readonly("preprocessor", &vision::classification::PaddleClasModel::GetPreprocessor)
.def_property_readonly("postprocessor", &vision::classification::PaddleClasModel::GetPostprocessor);
} }
} // namespace fastdeploy } // namespace fastdeploy

View File

@@ -0,0 +1,108 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision/classification/ppcls/preprocessor.h"
#include "fastdeploy/function/concat.h"
#include "yaml-cpp/yaml.h"
namespace fastdeploy {
namespace vision {
namespace classification {
PaddleClasPreprocessor::PaddleClasPreprocessor(const std::string& config_file) {
FDASSERT(BuildPreprocessPipelineFromConfig(config_file), "Failed to create PaddleClasPreprocessor.");
initialized_ = true;
}
bool PaddleClasPreprocessor::BuildPreprocessPipelineFromConfig(const std::string& config_file) {
processors_.clear();
YAML::Node cfg;
try {
cfg = YAML::LoadFile(config_file);
} catch (YAML::BadFile& e) {
FDERROR << "Failed to load yaml file " << config_file
<< ", maybe you should check this file." << std::endl;
return false;
}
auto preprocess_cfg = cfg["PreProcess"]["transform_ops"];
processors_.push_back(std::make_shared<BGR2RGB>());
for (const auto& op : preprocess_cfg) {
FDASSERT(op.IsMap(),
"Require the transform information in yaml be Map type.");
auto op_name = op.begin()->first.as<std::string>();
if (op_name == "ResizeImage") {
int target_size = op.begin()->second["resize_short"].as<int>();
bool use_scale = false;
int interp = 1;
processors_.push_back(
std::make_shared<ResizeByShort>(target_size, 1, use_scale));
} else if (op_name == "CropImage") {
int width = op.begin()->second["size"].as<int>();
int height = op.begin()->second["size"].as<int>();
processors_.push_back(std::make_shared<CenterCrop>(width, height));
} else if (op_name == "NormalizeImage") {
auto mean = op.begin()->second["mean"].as<std::vector<float>>();
auto std = op.begin()->second["std"].as<std::vector<float>>();
auto scale = op.begin()->second["scale"].as<float>();
FDASSERT((scale - 0.00392157) < 1e-06 && (scale - 0.00392157) > -1e-06,
"Only support scale in Normalize be 0.00392157, means the pixel "
"is in range of [0, 255].");
processors_.push_back(std::make_shared<Normalize>(mean, std));
} else if (op_name == "ToCHWImage") {
processors_.push_back(std::make_shared<HWC2CHW>());
} else {
FDERROR << "Unexcepted preprocess operator: " << op_name << "."
<< std::endl;
return false;
}
}
// Fusion will improve performance
FuseTransforms(&processors_);
return true;
}
bool PaddleClasPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTensor>* outputs) {
if (!initialized_) {
FDERROR << "The preprocessor is not initialized." << std::endl;
return false;
}
if (images->size() == 0) {
FDERROR << "The size of input images should be greater than 0." << std::endl;
return false;
}
for (size_t i = 0; i < images->size(); ++i) {
for (size_t j = 0; j < processors_.size(); ++j) {
if (!(*(processors_[j].get()))(&((*images)[i]))) {
FDERROR << "Failed to processs image:" << i << " in " << processors_[i]->Name() << "." << std::endl;
return false;
}
}
}
outputs->resize(1);
// Concat all the preprocessed data to a batch tensor
std::vector<FDTensor> tensors(images->size());
for (size_t i = 0; i < images->size(); ++i) {
(*images)[i].ShareWithTensor(&(tensors[i]));
tensors[i].ExpandDim(0);
}
Concat(tensors, &((*outputs)[0]), 0);
return true;
}
} // namespace classification
} // namespace vision
} // namespace fastdeploy

View File

@@ -0,0 +1,50 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
namespace fastdeploy {
namespace vision {
namespace classification {
/*! @brief Preprocessor object for PaddleClas serials model.
*/
class FASTDEPLOY_DECL PaddleClasPreprocessor {
public:
/** \brief Create a preprocessor instance for PaddleClas serials model
*
* \param[in] config_file Path of configuration file for deployment, e.g resnet/infer_cfg.yml
*/
explicit PaddleClasPreprocessor(const std::string& config_file);
/** \brief Process the input image and prepare input tensors for runtime
*
* \param[in] images The input image data list, all the elements are returned by cv::imread()
* \param[in] outputs The output tensors which will feed in runtime
* \return true if the preprocess successed, otherwise false
*/
bool Run(std::vector<FDMat>* images, std::vector<FDTensor>* outputs);
private:
bool BuildPreprocessPipelineFromConfig(const std::string& config_file);
std::vector<std::shared_ptr<Processor>> processors_;
bool initialized_ = false;
};
} // namespace classification
} // namespace vision
} // namespace fastdeploy

View File

@@ -51,7 +51,7 @@ bool ResizeByShort::ImplByFlyCV(Mat* mat) {
} else if (interp_ == 2) { } else if (interp_ == 2) {
interp_method = fcv::InterpolationType::INTER_CUBIC; interp_method = fcv::InterpolationType::INTER_CUBIC;
} else { } else {
FDERROR << "LimitLong: Only support interp_ be 0/1/2 with FlyCV, but " FDERROR << "LimitByShort: Only support interp_ be 0/1/2 with FlyCV, but "
"now it's " "now it's "
<< interp_ << "." << std::endl; << interp_ << "." << std::endl;
return false; return false;

View File

@@ -35,6 +35,14 @@ std::string ClassifyResult::Str() {
return out; return out;
} }
ClassifyResult& ClassifyResult::operator=(ClassifyResult&& other) {
if (&other != this) {
label_ids = std::move(other.label_ids);
scores = std::move(other.scores);
}
return *this;
}
void Mask::Reserve(int size) { data.reserve(size); } void Mask::Reserve(int size) { data.reserve(size); }
void Mask::Resize(int size) { data.resize(size); } void Mask::Resize(int size) { data.resize(size); }

View File

@@ -44,6 +44,7 @@ struct FASTDEPLOY_DECL BaseResult {
/*! @brief Classify result structure for all the image classify models /*! @brief Classify result structure for all the image classify models
*/ */
struct FASTDEPLOY_DECL ClassifyResult : public BaseResult { struct FASTDEPLOY_DECL ClassifyResult : public BaseResult {
ClassifyResult() = default;
/// Classify result for an image /// Classify result for an image
std::vector<int32_t> label_ids; std::vector<int32_t> label_ids;
/// The confidence for each classify result /// The confidence for each classify result
@@ -53,6 +54,11 @@ struct FASTDEPLOY_DECL ClassifyResult : public BaseResult {
/// Clear result /// Clear result
void Clear(); void Clear();
/// Copy constructor
ClassifyResult(const ClassifyResult& other) = default;
/// Move assignment
ClassifyResult& operator=(ClassifyResult&& other);
/// Debug function, convert the result to string to print /// Debug function, convert the result to string to print
std::string Str(); std::string Str();
}; };

View File

@@ -14,8 +14,9 @@
from __future__ import absolute_import from __future__ import absolute_import
from .contrib.yolov5cls import YOLOv5Cls from .contrib.yolov5cls import YOLOv5Cls
from .ppcls import PaddleClasModel from .ppcls import *
from .contrib.resnet import ResNet from .contrib.resnet import ResNet
PPLCNet = PaddleClasModel PPLCNet = PaddleClasModel
PPLCNetv2 = PaddleClasModel PPLCNetv2 = PaddleClasModel
EfficientNet = PaddleClasModel EfficientNet = PaddleClasModel

View File

@@ -18,6 +18,42 @@ from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C from .... import c_lib_wrap as C
class PaddleClasPreprocessor:
def __init__(self, config_file):
"""Create a preprocessor for PaddleClasModel from configuration file
:param config_file: (str)Path of configuration file, e.g resnet50/inference_cls.yaml
"""
self._preprocessor = C.vision.classification.PaddleClasPreprocessor(
config_file)
def run(self, input_ims):
"""Preprocess input images for PaddleClasModel
:param: input_ims: (list of numpy.ndarray)The input image
:return: list of FDTensor
"""
return self._preprocessor.run(input_ims)
class PaddleClasPostprocessor:
def __init__(self, topk=1):
"""Create a postprocessor for PaddleClasModel
:param topk: (int)Filter the top k classify label
"""
self._postprocessor = C.vision.classification.PaddleClasPostprocessor(
topk)
def run(self, runtime_results):
"""Postprocess the runtime results for PaddleClasModel
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
:return: list of ClassifyResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
"""
return self._postprocessor.run(runtime_results)
class PaddleClasModel(FastDeployModel): class PaddleClasModel(FastDeployModel):
def __init__(self, def __init__(self,
model_file, model_file,
@@ -45,9 +81,35 @@ class PaddleClasModel(FastDeployModel):
def predict(self, im, topk=1): def predict(self, im, topk=1):
"""Classify an input image """Classify an input image
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format :param im: (numpy.ndarray) The input image data, a 3-D array with layout HWC, BGR format
:param topk: (int)The topk result by the classify confidence score, default 1 :param topk: (int) Filter the topk classify result, default 1
:return: ClassifyResult :return: ClassifyResult
""" """
return self._model.predict(im, topk) self.postprocessor.topk = topk
return self._model.predict(im)
def batch_predict(self, images):
"""Classify a batch of input image
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of ClassifyResult
"""
return self._model.batch_predict(images)
@property
def preprocessor(self):
"""Get PaddleClasPreprocessor object of the loaded model
:return PaddleClasPreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get PaddleClasPostprocessor object of the loaded model
:return PaddleClasPostprocessor
"""
return self._model.postprocessor

View File

@@ -22,9 +22,11 @@ import runtime_config as rc
def test_classification_mobilenetv2(): def test_classification_mobilenetv2():
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz" model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz"
input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg" input_url1 = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg"
input_url2 = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00030010.jpeg"
fd.download_and_decompress(model_url, "resources") fd.download_and_decompress(model_url, "resources")
fd.download(input_url, "resources") fd.download(input_url1, "resources")
fd.download(input_url2, "resources")
model_path = "resources/MobileNetV1_x0_25_infer" model_path = "resources/MobileNetV1_x0_25_infer"
model_file = "resources/MobileNetV1_x0_25_infer/inference.pdmodel" model_file = "resources/MobileNetV1_x0_25_infer/inference.pdmodel"
@@ -33,18 +35,67 @@ def test_classification_mobilenetv2():
model = fd.vision.classification.PaddleClasModel( model = fd.vision.classification.PaddleClasModel(
model_file, params_file, config_file, runtime_option=rc.test_option) model_file, params_file, config_file, runtime_option=rc.test_option)
expected_label_ids = [153, 333, 259, 338, 265, 154] expected_label_ids_1 = [153, 333, 259, 338, 265, 154]
expected_scores = [ expected_scores_1 = [
0.221088, 0.109457, 0.078668, 0.076814, 0.052401, 0.048206 0.221088, 0.109457, 0.078668, 0.076814, 0.052401, 0.048206
] ]
expected_label_ids_2 = [80, 23, 93, 99, 143, 7]
expected_scores_2 = [
0.975599, 0.014083, 0.003821, 0.001571, 0.001233, 0.000924
]
# compare diff # compare diff
im = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg") im1 = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg")
for i in range(2): im2 = cv2.imread("./resources/ILSVRC2012_val_00030010.jpeg")
result = model.predict(im, topk=6)
diff_label = np.fabs( # for i in range(3000000):
np.array(result.label_ids) - np.array(expected_label_ids)) while True:
diff_scores = np.fabs( # test single predict
np.array(result.scores) - np.array(expected_scores)) model.postprocessor.topk = 6
assert diff_label.max() < 1e-06, "There's difference in classify label." result1 = model.predict(im1)
assert diff_scores.max( result2 = model.predict(im2)
) < 1e-05, "There's difference in classify score."
diff_label_1 = np.fabs(
np.array(result1.label_ids) - np.array(expected_label_ids_1))
diff_label_2 = np.fabs(
np.array(result2.label_ids) - np.array(expected_label_ids_2))
diff_scores_1 = np.fabs(
np.array(result1.scores) - np.array(expected_scores_1))
diff_scores_2 = np.fabs(
np.array(result2.scores) - np.array(expected_scores_2))
assert diff_label_1.max(
) < 1e-06, "There's difference in classify label 1."
assert diff_scores_1.max(
) < 1e-05, "There's difference in classify score 1."
assert diff_label_2.max(
) < 1e-06, "There's difference in classify label 2."
assert diff_scores_2.max(
) < 1e-05, "There's difference in classify score 2."
# test batch predict
results = model.batch_predict([im1, im2])
result1 = results[0]
result2 = results[1]
diff_label_1 = np.fabs(
np.array(result1.label_ids) - np.array(expected_label_ids_1))
diff_label_2 = np.fabs(
np.array(result2.label_ids) - np.array(expected_label_ids_2))
diff_scores_1 = np.fabs(
np.array(result1.scores) - np.array(expected_scores_1))
diff_scores_2 = np.fabs(
np.array(result2.scores) - np.array(expected_scores_2))
assert diff_label_1.max(
) < 1e-06, "There's difference in classify label 1."
assert diff_scores_1.max(
) < 1e-05, "There's difference in classify score 1."
assert diff_label_2.max(
) < 1e-06, "There's difference in classify label 2."
assert diff_scores_2.max(
) < 1e-05, "There's difference in classify score 2."
if __name__ == "__main__":
test_classification_mobilenetv2()