Files
FastDeploy/csrc/fastdeploy/vision/segmentation/ppseg/model.h
huangjianhui 32047016d6 Update evaluation function to support calculate average inference time (#106)
* Update README.md

* Update README.md

* Update README.md

* Create README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Add evaluation calculate time and fix some bugs

* Update classification __init__

* Move to ppseg

Co-authored-by: Jason <jiangjiajun@baidu.com>
2022-08-12 17:42:09 +08:00

47 lines
1.4 KiB
C++

#pragma once
#include "fastdeploy/fastdeploy_model.h"
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
namespace fastdeploy {
namespace vision {
namespace segmentation {
class FASTDEPLOY_DECL PaddleSegModel : public FastDeployModel {
public:
PaddleSegModel(const std::string& model_file, const std::string& params_file,
const std::string& config_file,
const RuntimeOption& custom_option = RuntimeOption(),
const Frontend& model_format = Frontend::PADDLE);
std::string ModelName() const { return "PaddleSeg"; }
virtual bool Predict(cv::Mat* im, SegmentationResult* result);
bool with_softmax = false;
bool is_vertical_screen = false;
private:
bool Initialize();
bool BuildPreprocessPipelineFromConfig();
bool Preprocess(Mat* mat, FDTensor* outputs,
std::map<std::string, std::array<int, 2>>* im_info);
bool Postprocess(FDTensor& infer_result, SegmentationResult* result,
std::map<std::string, std::array<int, 2>>* im_info);
bool is_resized = false;
std::vector<std::shared_ptr<Processor>> processors_;
std::string config_file_;
};
void FDTensor2FP32CVMat(cv::Mat& mat, FDTensor& infer_result,
bool contain_score_map);
} // namespace segmentation
} // namespace vision
} // namespace fastdeploy