mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 20:02:53 +08:00 
			
		
		
		
	 866d044898
			
		
	
	866d044898
	
	
	
		
			
			* model done, CLA fix * remove letter_box and ConvertAndPermute, use resize hwc2chw and convert in preprocess * remove useless values in preprocess * remove useless values in preprocess * fix reviewed problem * fix reviewed problem pybind * fix reviewed problem pybind * postprocess fix * add test_fastestdet.py, coco_val2017_500 fixed done, ready to review * fix reviewed problem * python/.../fastestdet.py * fix infer.cc, preprocess, python/fastestdet.py * fix examples/python/infer.py
		
			
				
	
	
		
			82 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			82 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
 | |
| //
 | |
| // Licensed under the Apache License, Version 2.0 (the "License");
 | |
| // you may not use this file except in compliance with the License.
 | |
| // You may obtain a copy of the License at
 | |
| //
 | |
| //     http://www.apache.org/licenses/LICENSE-2.0
 | |
| //
 | |
| // Unless required by applicable law or agreed to in writing, software
 | |
| // distributed under the License is distributed on an "AS IS" BASIS,
 | |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| // See the License for the specific language governing permissions and
 | |
| // limitations under the License.
 | |
| 
 | |
| #include "fastdeploy/vision/detection/contrib/fastestdet/preprocessor.h"
 | |
| #include "fastdeploy/function/concat.h"
 | |
| 
 | |
| namespace fastdeploy {
 | |
| namespace vision {
 | |
| namespace detection {
 | |
| 
 | |
| FastestDetPreprocessor::FastestDetPreprocessor() {
 | |
|   size_ = {352, 352}; //{h,w}
 | |
| }
 | |
| 
 | |
| bool FastestDetPreprocessor::Preprocess(FDMat* mat, FDTensor* output,
 | |
|             std::map<std::string, std::array<float, 2>>* im_info) {
 | |
|   // Record the shape of image and the shape of preprocessed image
 | |
|   (*im_info)["input_shape"] = {static_cast<float>(mat->Height()),
 | |
|                                static_cast<float>(mat->Width())};
 | |
| 
 | |
|   // process after image load
 | |
|   double ratio = (size_[0] * 1.0) / std::max(static_cast<float>(mat->Height()),
 | |
|                                             static_cast<float>(mat->Width()));
 | |
| 
 | |
|   // fastestdet's preprocess steps
 | |
|   // 1. resize
 | |
|   // 2. convert_and_permute(swap_rb=false)
 | |
|   Resize::Run(mat, size_[0], size_[1]); //resize
 | |
|   std::vector<float> alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f};
 | |
|   std::vector<float> beta = {0.0f, 0.0f, 0.0f};
 | |
|   //convert to float and HWC2CHW
 | |
|   ConvertAndPermute::Run(mat, alpha, beta, false);
 | |
| 
 | |
|   // Record output shape of preprocessed image
 | |
|   (*im_info)["output_shape"] = {static_cast<float>(mat->Height()),
 | |
|                                 static_cast<float>(mat->Width())};
 | |
| 
 | |
|   mat->ShareWithTensor(output);
 | |
|   output->ExpandDim(0);  // reshape to n, h, w, c
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool FastestDetPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTensor>* outputs,
 | |
|                              std::vector<std::map<std::string, std::array<float, 2>>>* ims_info) {
 | |
|   if (images->size() == 0) {
 | |
|     FDERROR << "The size of input images should be greater than 0." << std::endl;
 | |
|     return false;
 | |
|   }
 | |
|   ims_info->resize(images->size());
 | |
|   outputs->resize(1);
 | |
|   // Concat all the preprocessed data to a batch tensor
 | |
|   std::vector<FDTensor> tensors(images->size()); 
 | |
|   for (size_t i = 0; i < images->size(); ++i) {
 | |
|     if (!Preprocess(&(*images)[i], &tensors[i], &(*ims_info)[i])) {
 | |
|       FDERROR << "Failed to preprocess input image." << std::endl;
 | |
|       return false;
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   if (tensors.size() == 1) {
 | |
|     (*outputs)[0] = std::move(tensors[0]);
 | |
|   } else {
 | |
|     function::Concat(tensors, &((*outputs)[0]), 0);
 | |
|   }
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| }  // namespace detection
 | |
| }  // namespace vision
 | |
| }  // namespace fastdeploy
 |