Files
FastDeploy/fastdeploy/vision/headpose/contrib/fsanet.h
WJJ1995 7150e6405c [Model] Add FSANet model (#448)
* add yolov5cls

* fixed bugs

* fixed bugs

* fixed preprocess bug

* add yolov5cls readme

* deal with comments

* Add YOLOv5Cls Note

* add yolov5cls test

* add rvm support

* support rvm model

* add rvm demo

* fixed bugs

* add rvm readme

* add TRT support

* add trt support

* add rvm test

* add EXPORT.md

* rename export.md

* rm poros doxyen

* deal with comments

* deal with comments

* add rvm video_mode note

* add fsanet

* fixed bug

* update readme

* fixed for ci

* deal with comments

* deal with comments

* deal with comments

Co-authored-by: Jason <jiangjiajun@baidu.com>
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
2022-11-04 11:00:35 +08:00

65 lines
2.6 KiB
C++

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/fastdeploy_model.h"
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"
namespace fastdeploy {
namespace vision {
namespace headpose {
/*! @brief FSANet model object used when to load a FSANet model exported by FSANet.
*/
class FASTDEPLOY_DECL FSANet : public FastDeployModel {
public:
/** \brief Set path of model file and the configuration of runtime.
*
* \param[in] model_file Path of model file, e.g ./fsanet-var.onnx
* \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
* \param[in] model_format Model format of the loaded model, default is ONNX format
*/
FSANet(const std::string& model_file, const std::string& params_file = "",
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::ONNX);
std::string ModelName() const { return "FSANet"; }
/** \brief Predict the face detection result for an input image
*
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result The output face detection result will be writen to this structure
* \return true if the prediction successed, otherwise false
*/
virtual bool Predict(cv::Mat* im, HeadPoseResult* result);
/// tuple of (width, height), default (64, 64)
std::vector<int> size;
private:
bool Initialize();
bool Preprocess(Mat* mat, FDTensor* outputs,
std::map<std::string, std::array<int, 2>>* im_info);
bool Postprocess(FDTensor& infer_result, HeadPoseResult* result,
const std::map<std::string, std::array<int, 2>>& im_info);
};
} // namespace headpose
} // namespace vision
} // namespace fastdeploy