mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 20:02:53 +08:00 
			
		
		
		
	 4a4c37aa97
			
		
	
	4a4c37aa97
	
	
	
		
			
			* Update ppseg resize image && valid backend according to input_shape * Update ppseg model.cc Co-authored-by: Jason <jiangjiajun@baidu.com>
		
			
				
	
	
		
			46 lines
		
	
	
		
			1.2 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			46 lines
		
	
	
		
			1.2 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| #pragma once
 | |
| #include "fastdeploy/fastdeploy_model.h"
 | |
| #include "fastdeploy/vision/common/processors/transform.h"
 | |
| #include "fastdeploy/vision/common/result.h"
 | |
| 
 | |
| namespace fastdeploy {
 | |
| namespace vision {
 | |
| namespace segmentation {
 | |
| 
 | |
| class FASTDEPLOY_DECL PaddleSegModel : public FastDeployModel {
 | |
|  public:
 | |
|   PaddleSegModel(const std::string& model_file, const std::string& params_file,
 | |
|                  const std::string& config_file,
 | |
|                  const RuntimeOption& custom_option = RuntimeOption(),
 | |
|                  const ModelFormat& model_format = ModelFormat::PADDLE);
 | |
| 
 | |
|   std::string ModelName() const { return "PaddleSeg"; }
 | |
| 
 | |
|   virtual bool Predict(cv::Mat* im, SegmentationResult* result);
 | |
| 
 | |
|   bool apply_softmax = false;
 | |
| 
 | |
|   bool is_vertical_screen = false;
 | |
| 
 | |
|  private:
 | |
|   bool Initialize();
 | |
| 
 | |
|   bool BuildPreprocessPipelineFromConfig();
 | |
| 
 | |
|   bool Preprocess(Mat* mat, FDTensor* outputs);
 | |
| 
 | |
|   bool Postprocess(FDTensor* infer_result, SegmentationResult* result,
 | |
|                    const std::map<std::string, std::array<int, 2>>& im_info);
 | |
| 
 | |
|   bool is_with_softmax = false;
 | |
| 
 | |
|   bool is_with_argmax = true;
 | |
| 
 | |
|   std::vector<std::shared_ptr<Processor>> processors_;
 | |
|   std::string config_file_;
 | |
| };
 | |
| 
 | |
| }  // namespace segmentation
 | |
| }  // namespace vision
 | |
| }  // namespace fastdeploy
 |