Files
FastDeploy/fastdeploy/vision/detection/ppdet/base.cc
huangjianhui 9937b6c325 [Other] Add Model Clone function for PaddleClas && PaddleDet && PaddleSeg (#791)
* Refactor PaddleSeg with preprocessor && postprocessor

* Fix bugs

* Delete redundancy code

* Modify by comments

* Refactor according to comments

* Add batch evaluation

* Add single test script

* Add ppliteseg single test script && fix eval(raise) error

* fix bug

* Fix evaluation segmentation.py batch predict

* Fix segmentation evaluation bug

* Fix evaluation segmentation bugs

* Update segmentation result docs

* Update old predict api and DisableNormalizeAndPermute

* Update resize segmentation label map with cv::INTER_NEAREST

* Add Model Clone function for PaddleClas && PaddleDet && PaddleSeg

* Add multi thread demo

* Add python model clone function

* Add multi thread python && C++ example

* Fix bug

Co-authored-by: Jason <jiangjiajun@baidu.com>
2022-12-13 15:19:47 +08:00

85 lines
2.6 KiB
C++
Executable File

#include "fastdeploy/vision/detection/ppdet/base.h"
#include "fastdeploy/vision/utils/utils.h"
#include "yaml-cpp/yaml.h"
#include "fastdeploy/utils/unique_ptr.h"
namespace fastdeploy {
namespace vision {
namespace detection {
PPDetBase::PPDetBase(const std::string& model_file,
const std::string& params_file,
const std::string& config_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format)
: preprocessor_(config_file) {
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;
runtime_option.params_file = params_file;
}
std::unique_ptr<PPDetBase> PPDetBase::Clone() const {
std::unique_ptr<PPDetBase> clone_model = fastdeploy::utils::make_unique<PPDetBase>(PPDetBase(*this));
clone_model->SetRuntime(clone_model->CloneRuntime());
return clone_model;
}
bool PPDetBase::Initialize() {
if (!InitRuntime()) {
FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
return false;
}
return true;
}
bool PPDetBase::Predict(cv::Mat* im, DetectionResult* result) {
return Predict(*im, result);
}
bool PPDetBase::Predict(const cv::Mat& im, DetectionResult* result) {
std::vector<DetectionResult> results;
if (!BatchPredict({im}, &results)) {
return false;
}
*result = std::move(results[0]);
return true;
}
bool PPDetBase::BatchPredict(const std::vector<cv::Mat>& imgs,
std::vector<DetectionResult>* results) {
std::vector<FDMat> fd_images = WrapMat(imgs);
if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
FDERROR << "Failed to preprocess the input image." << std::endl;
return false;
}
reused_input_tensors_[0].name = "image";
reused_input_tensors_[1].name = "scale_factor";
reused_input_tensors_[2].name = "im_shape";
if(postprocessor_.DecodeAndNMSApplied()){
postprocessor_.SetScaleFactor(static_cast<float*>(reused_input_tensors_[1].Data()));
}
// Some models don't need scale_factor and im_shape as input
while (reused_input_tensors_.size() != NumInputsOfRuntime()) {
reused_input_tensors_.pop_back();
}
if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
FDERROR << "Failed to inference by runtime." << std::endl;
return false;
}
if (!postprocessor_.Run(reused_output_tensors_, results)) {
FDERROR << "Failed to postprocess the inference results by runtime."
<< std::endl;
return false;
}
return true;
}
} // namespace detection
} // namespace vision
} // namespace fastdeploy