mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 09:31:35 +08:00

* Add tinypose model * Add PPTinypose python API * Fix picodet preprocess bug && Add Tinypose examples * Update tinypose example code * Update ppseg preprocess if condition * Update ppseg backend support type * Update permute.h * Update README.md * Update code with comments * Move files dir * Delete premute.cc * Add single model pptinypose * Delete pptinypose old code in ppdet * Code format * Add ppdet + pptinypose pipeline model * Fix bug for posedetpipeline * Change Frontend to ModelFormat * Change Frontend to ModelFormat in __init__.py * Add python posedetpipeline/ * Update pptinypose example dir name * Update README.md * Update README.md * Update README.md * Update README.md * Create keypointdetection_result.md * Create README.md * Create README.md * Create README.md * Update README.md * Update README.md * Create README.md * Fix det_keypoint_unite_infer.py bug * Create README.md * Update PP-Tinypose by comment * Update by comment * Add pipeline directory * Add pptinypose dir * Update pptinypose to align accuracy * Addd warpAffine processor * Update GetCpuMat to GetOpenCVMat * Add comment for pptinypose && pipline * Update docs/main_page.md * Add README.md for pptinypose * Add README for det_keypoint_unite * Remove ENABLE_PIPELINE option * Remove ENABLE_PIPELINE option * Change pptinypose default backend * PP-TinyPose Pipeline support multi PP-Detection models * Update pp-tinypose comment * Update by comments * Add single test example Co-authored-by: Jason <jiangjiajun@baidu.com>
122 lines
3.1 KiB
C++
122 lines
3.1 KiB
C++
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
#pragma once
|
|
#include "fastdeploy/core/fd_tensor.h"
|
|
#include "opencv2/core/core.hpp"
|
|
|
|
#ifdef ENABLE_OPENCV_CUDA
|
|
#include "opencv2/core/cuda.hpp"
|
|
#include "opencv2/cudaarithm.hpp"
|
|
#include "opencv2/cudaimgproc.hpp"
|
|
#include "opencv2/cudawarping.hpp"
|
|
#endif
|
|
|
|
namespace fcv {
|
|
class Mat;
|
|
} // namespace fcv
|
|
|
|
namespace fastdeploy {
|
|
namespace vision {
|
|
|
|
enum ProcLib { DEFAULT, OPENCV, FALCONCV};
|
|
enum Layout { HWC, CHW };
|
|
|
|
struct FASTDEPLOY_DECL Mat {
|
|
explicit Mat(cv::Mat& mat) {
|
|
cpu_mat = mat;
|
|
layout = Layout::HWC;
|
|
height = cpu_mat.rows;
|
|
width = cpu_mat.cols;
|
|
channels = cpu_mat.channels();
|
|
mat_type = ProcLib::OPENCV;
|
|
}
|
|
|
|
// careful if you use this interface
|
|
// this only used if you don't want to write
|
|
// the original data, and write to a new cv::Mat
|
|
// then replace the old cv::Mat of this structure
|
|
void SetMat(const cv::Mat& mat) {
|
|
cpu_mat = mat;
|
|
mat_type = ProcLib::OPENCV;
|
|
}
|
|
|
|
inline cv::Mat* GetOpenCVMat() {
|
|
FDASSERT(mat_type == ProcLib::OPENCV, "Met non cv::Mat data structure.");
|
|
return &cpu_mat;
|
|
}
|
|
|
|
inline const cv::Mat* GetOpenCVMat() const {
|
|
FDASSERT(mat_type == ProcLib::OPENCV, "Met non cv::Mat data structure.");
|
|
return &cpu_mat;
|
|
}
|
|
|
|
#ifdef ENABLE_FALCONCV
|
|
void SetMat(const fcv::Mat& mat) {
|
|
fcv_mat = mat;
|
|
mat_type = Proclib::FALCONCV;
|
|
}
|
|
|
|
inline fcv::Mat* GetFalconCVMat() {
|
|
FDASSERT(mat_type == ProcLib::FALCONCV, "Met non fcv::Mat data strucure.");
|
|
return &fcv_mat;
|
|
}
|
|
#endif
|
|
|
|
void* Data();
|
|
|
|
private:
|
|
int channels;
|
|
int height;
|
|
int width;
|
|
cv::Mat cpu_mat;
|
|
|
|
#ifdef ENABLE_FALCONCV
|
|
fcv::Mat fcv_mat;
|
|
#endif
|
|
|
|
public:
|
|
template<typename T>
|
|
T* GetMat() {
|
|
return &cpu_mat;
|
|
}
|
|
|
|
FDDataType Type();
|
|
int Channels() const { return channels; }
|
|
int Width() const { return width; }
|
|
int Height() const { return height; }
|
|
void SetChannels(int s) { channels = s; }
|
|
void SetWidth(int w) { width = w; }
|
|
void SetHeight(int h) { height = h; }
|
|
|
|
// Transfer the vision::Mat to FDTensor
|
|
void ShareWithTensor(FDTensor* tensor);
|
|
// Only support copy to cpu tensor now
|
|
bool CopyToTensor(FDTensor* tensor);
|
|
|
|
// debug functions
|
|
// TODO(jiangjiajun) Develop a right process pipeline with c++ is not a easy
|
|
// things
|
|
// Will add more debug function here to help debug processed image
|
|
// This function will print shape / mean of each channels of the Mat
|
|
void PrintInfo(const std::string& flag);
|
|
|
|
ProcLib mat_type = ProcLib::OPENCV;
|
|
Layout layout = Layout::HWC;
|
|
};
|
|
|
|
Mat CreateFromTensor(const FDTensor& tensor);
|
|
|
|
} // namespace vision
|
|
} // namespace fastdeploy
|