Files
FastDeploy/fastdeploy/vision/keypointdet/pptinypose/pptinypose.h
huangjianhui b565c15bf7 [Model] Add tinypose single && pipeline model (#177)
* Add tinypose model

* Add PPTinypose python API

* Fix picodet preprocess bug && Add Tinypose examples

* Update tinypose example code

* Update ppseg preprocess if condition

* Update ppseg backend support type

* Update permute.h

* Update README.md

* Update code with comments

* Move files dir

* Delete premute.cc

* Add single model pptinypose

* Delete pptinypose old code in ppdet

* Code format

* Add ppdet + pptinypose pipeline model

* Fix bug for posedetpipeline

* Change Frontend to ModelFormat

* Change Frontend to ModelFormat in __init__.py

* Add python posedetpipeline/

* Update pptinypose example dir name

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Create keypointdetection_result.md

* Create README.md

* Create README.md

* Create README.md

* Update README.md

* Update README.md

* Create README.md

* Fix det_keypoint_unite_infer.py bug

* Create README.md

* Update PP-Tinypose by comment

* Update by comment

* Add pipeline directory

* Add pptinypose dir

* Update pptinypose to align accuracy

* Addd warpAffine processor

* Update GetCpuMat to  GetOpenCVMat

* Add comment for pptinypose && pipline

* Update docs/main_page.md

* Add README.md for pptinypose

* Add README for det_keypoint_unite

* Remove ENABLE_PIPELINE option

* Remove ENABLE_PIPELINE option

* Change pptinypose default backend

* PP-TinyPose Pipeline support multi PP-Detection models

* Update pp-tinypose comment

* Update by comments

* Add single test example

Co-authored-by: Jason <jiangjiajun@baidu.com>
2022-10-21 09:28:23 +08:00

91 lines
4.0 KiB
C++

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/fastdeploy_model.h"
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
#include "fastdeploy/vision/keypointdet/pptinypose/pptinypose_utils.h"
namespace fastdeploy {
namespace vision {
/** \brief All keypoint detection model APIs are defined inside this namespace
*
*/
namespace keypointdetection {
/*! @brief PPTinyPose model object used when to load a PPTinyPose model exported by PaddleDetection
*/
class FASTDEPLOY_DECL PPTinyPose : public FastDeployModel {
public:
/** \brief Set path of model file and configuration file, and the configuration of runtime
*
* \param[in] model_file Path of model file, e.g pptinypose/model.pdmodel
* \param[in] params_file Path of parameter file, e.g pptinypose/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] config_file Path of configuration file for deployment, e.g pptinypose/infer_cfg.yml
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
* \param[in] model_format Model format of the loaded model, default is Paddle format
*/
PPTinyPose(const std::string& model_file, const std::string& params_file,
const std::string& config_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE);
/// Get model's name
std::string ModelName() const { return "PaddleDetection/PPTinyPose"; }
/** \brief Predict the keypoint detection result for an input image
*
* \param[in] im The input image data, comes from cv::imread()
* \param[in] result The output keypoint detection result will be writen to this structure
* \return true if the keypoint prediction successed, otherwise false
*/
bool Predict(cv::Mat* im, KeyPointDetectionResult* result);
/** \brief Predict the keypoint detection result with given detection result for an input image
*
* \param[in] im The input image data, comes from cv::imread()
* \param[in] result The output keypoint detection result will be writen to this structure
* \param[in] detection_result The structure strores pedestrian detection result, which is used to crop image for multi-persons keypoint detection
* \return true if the keypoint prediction successed, otherwise false
*/
bool Predict(cv::Mat* im, KeyPointDetectionResult* result,
const DetectionResult& detection_result);
/** \brief Whether using Distribution-Aware Coordinate Representation for Human Pose Estimation(DARK for short) in postprocess, default is true
*/
bool use_dark = true;
protected:
bool Initialize();
/// Build the preprocess pipeline from the loaded model
bool BuildPreprocessPipelineFromConfig();
/// Preprocess an input image, and set the preprocessed results to `outputs`
bool Preprocess(Mat* mat, std::vector<FDTensor>* outputs);
/// Postprocess the inferenced results, and set the final result to `result`
bool Postprocess(std::vector<FDTensor>& infer_result,
KeyPointDetectionResult* result,
const std::vector<float>& center,
const std::vector<float>& scale);
private:
std::vector<std::shared_ptr<Processor>> processors_;
std::string config_file_;
};
} // namespace keypointdetection
} // namespace vision
} // namespace fastdeploy