mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00

* first commit for yolov7 * pybind for yolov7 * CPP README.md * CPP README.md * modified yolov7.cc * README.md * python file modify * delete license in fastdeploy/ * repush the conflict part * README.md modified * README.md modified * file path modified * file path modified * file path modified * file path modified * file path modified * README modified * README modified * move some helpers to private * add examples for yolov7 * api.md modified * api.md modified * api.md modified * YOLOv7 * yolov7 release link * yolov7 release link * yolov7 release link * copyright * change some helpers to private * change variables to const and fix documents. * gitignore * Transfer some funtions to private member of class * Transfer some funtions to private member of class * Merge from develop (#9) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * first commit for yolor * for merge * Develop (#11) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * Yolor (#16) * Develop (#11) (#12) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * Develop (#13) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * Develop (#14) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <928090362@qq.com> * add is_dynamic for YOLO series (#22) * modify ppmatting backend and docs * modify ppmatting docs * fix the PPMatting size problem * fix LimitShort's log * retrigger ci * modify PPMatting docs * modify the way for dealing with LimitShort * add python comments for external models * modify resnet c++ comments * modify C++ comments for external models * modify python comments and add result class comments * fix comments compile error * modify result.h comments * python API for detection result * modify yolov7 docs * modify python detection api Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <928090362@qq.com>
225 lines
10 KiB
C++
225 lines
10 KiB
C++
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#include "fastdeploy/pybind/main.h"
|
|
|
|
namespace fastdeploy {
|
|
void BindVisualize(pybind11::module& m) {
|
|
m.def("vis_detection",
|
|
[](pybind11::array& im_data, vision::DetectionResult& result,
|
|
std::vector<std::string>& labels, float score_threshold,
|
|
int line_size, float font_size) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
cv::Mat vis_im;
|
|
if (labels.empty()) {
|
|
vis_im = vision::VisDetection(im, result, score_threshold,
|
|
line_size, font_size);
|
|
} else {
|
|
vis_im = vision::VisDetection(im, result, labels, score_threshold,
|
|
line_size, font_size);
|
|
}
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_face_detection",
|
|
[](pybind11::array& im_data, vision::FaceDetectionResult& result,
|
|
int line_size, float font_size) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im =
|
|
vision::VisFaceDetection(im, result, line_size, font_size);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_face_alignment",
|
|
[](pybind11::array& im_data, vision::FaceAlignmentResult& result,
|
|
int line_size) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::VisFaceAlignment(im, result, line_size);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_segmentation",
|
|
[](pybind11::array& im_data, vision::SegmentationResult& result,
|
|
float weight) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::VisSegmentation(im, result, weight);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("swap_background",
|
|
[](pybind11::array& im_data, pybind11::array& background_data,
|
|
vision::MattingResult& result, bool remove_small_connected_area) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
cv::Mat background = PyArrayToCvMat(background_data);
|
|
auto vis_im = vision::SwapBackground(im, background, result,
|
|
remove_small_connected_area);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("swap_background",
|
|
[](pybind11::array& im_data, pybind11::array& background_data,
|
|
vision::SegmentationResult& result, int background_label) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
cv::Mat background = PyArrayToCvMat(background_data);
|
|
auto vis_im = vision::SwapBackground(im, background, result,
|
|
background_label);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_ppocr",
|
|
[](pybind11::array& im_data, vision::OCRResult& result) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::VisOcr(im, result);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_mot",
|
|
[](pybind11::array& im_data, vision::MOTResult& result,
|
|
float score_threshold, vision::tracking::TrailRecorder record) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::VisMOT(im, result, score_threshold, &record);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_matting",
|
|
[](pybind11::array& im_data, vision::MattingResult& result,
|
|
bool remove_small_connected_area) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
auto vis_im =
|
|
vision::VisMatting(im, result, remove_small_connected_area);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def("vis_headpose",
|
|
[](pybind11::array& im_data, vision::HeadPoseResult& result,
|
|
int size, int line_size) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::VisHeadPose(im, result, size, line_size);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
});
|
|
|
|
pybind11::class_<vision::Visualize>(m, "Visualize")
|
|
.def(pybind11::init<>())
|
|
.def_static("vis_detection",
|
|
[](pybind11::array& im_data, vision::DetectionResult& result,
|
|
float score_threshold, int line_size, float font_size) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::Visualize::VisDetection(
|
|
im, result, score_threshold, line_size, font_size);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static(
|
|
"vis_keypoint_detection",
|
|
[](pybind11::array& im_data, vision::KeyPointDetectionResult& result,
|
|
float conf_threshold) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im =
|
|
vision::VisKeypointDetection(im, result, conf_threshold);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static(
|
|
"vis_face_detection",
|
|
[](pybind11::array& im_data, vision::FaceDetectionResult& result,
|
|
int line_size, float font_size) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::Visualize::VisFaceDetection(
|
|
im, result, line_size, font_size);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static(
|
|
"vis_segmentation",
|
|
[](pybind11::array& im_data, vision::SegmentationResult& result) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::Visualize::VisSegmentation(im, result);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static(
|
|
"swap_background_matting",
|
|
[](pybind11::array& im_data, pybind11::array& background_data,
|
|
vision::MattingResult& result, bool remove_small_connected_area) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
cv::Mat background = PyArrayToCvMat(background_data);
|
|
auto vis_im = vision::Visualize::SwapBackgroundMatting(
|
|
im, background, result, remove_small_connected_area);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static("swap_background_segmentation",
|
|
[](pybind11::array& im_data, pybind11::array& background_data,
|
|
int background_label, vision::SegmentationResult& result) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
cv::Mat background = PyArrayToCvMat(background_data);
|
|
auto vis_im = vision::Visualize::SwapBackgroundSegmentation(
|
|
im, background, background_label, result);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static("remove_small_connected_area",
|
|
[](pybind11::array& alpha_pred_data, float threshold) {
|
|
cv::Mat alpha_pred = PyArrayToCvMat(alpha_pred_data);
|
|
auto vis_im = vision::Visualize::RemoveSmallConnectedArea(
|
|
alpha_pred, threshold);
|
|
})
|
|
.def_static("vis_ppocr",
|
|
[](pybind11::array& im_data, vision::OCRResult& result) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::Visualize::VisOcr(im, result);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static(
|
|
"vis_mot",
|
|
[](pybind11::array& im_data, vision::MOTResult& result,
|
|
float score_threshold, vision::tracking::TrailRecorder* record) {
|
|
auto im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::VisMOT(im, result, score_threshold, record);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
})
|
|
.def_static("vis_matting_alpha",
|
|
[](pybind11::array& im_data, vision::MattingResult& result,
|
|
bool remove_small_connected_area) {
|
|
cv::Mat im = PyArrayToCvMat(im_data);
|
|
auto vis_im = vision::Visualize::VisMattingAlpha(
|
|
im, result, remove_small_connected_area);
|
|
FDTensor out;
|
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
|
return TensorToPyArray(out);
|
|
});
|
|
}
|
|
} // namespace fastdeploy
|