[Model] add tracking trail on vis_mot (#461)

* add override mark

* delete some

* recovery

* recovery

* add tracking

* add tracking py_bind and example

* add pptracking

* add pptracking

* iomanip head file

* add opencv_video lib

* add python libs package

Signed-off-by: ChaoII <849453582@qq.com>

* complete comments

Signed-off-by: ChaoII <849453582@qq.com>

* add jdeTracker_ member variable

Signed-off-by: ChaoII <849453582@qq.com>

* add 'FASTDEPLOY_DECL' macro

Signed-off-by: ChaoII <849453582@qq.com>

* remove kwargs params

Signed-off-by: ChaoII <849453582@qq.com>

* [Doc]update pptracking docs

* delete 'ENABLE_PADDLE_FRONTEND' switch

* add pptracking unit test

* update pptracking unit test

Signed-off-by: ChaoII <849453582@qq.com>

* modify test video file path and remove trt test

* update unit test model url

* remove 'FASTDEPLOY_DECL' macro

Signed-off-by: ChaoII <849453582@qq.com>

* fix build python packages about pptracking on win32

Signed-off-by: ChaoII <849453582@qq.com>

* update comment

Signed-off-by: ChaoII <849453582@qq.com>

* add pptracking model explain

Signed-off-by: ChaoII <849453582@qq.com>

* add tracking trail on vis_mot

* add tracking trail

* modify code for  some suggestion

* remove unused import

* fix import bug

Signed-off-by: ChaoII <849453582@qq.com>
Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
ChaoII
2022-11-03 09:57:07 +08:00
committed by GitHub
parent 328212f270
commit 22d60fdadf
16 changed files with 208 additions and 116 deletions

View File

@@ -25,73 +25,63 @@ cv::Scalar GetMOTBoxColor(int idx) {
return color;
}
cv::Mat VisMOT(const cv::Mat &img, const MOTResult &results, float fps, int frame_id) {
cv::Mat VisMOT(const cv::Mat &img, const MOTResult &results,
float score_threshold, tracking::TrailRecorder* recorder) {
cv::Mat vis_img = img.clone();
int im_h = img.rows;
int im_w = img.cols;
float text_scale = std::max(1, static_cast<int>(im_w / 1600.));
float text_thickness = 2.;
float line_thickness = std::max(1, static_cast<int>(im_w / 500.));
std::ostringstream oss;
oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
oss << "frame: " << frame_id << " ";
oss << "fps: " << fps << " ";
oss << "num: " << results.boxes.size();
std::string text = oss.str();
cv::Point origin;
origin.x = 0;
origin.y = static_cast<int>(15 * text_scale);
cv::putText(vis_img,
text,
origin,
cv::FONT_HERSHEY_PLAIN,
text_scale,
cv::Scalar(0, 0, 255),
text_thickness);
for (int i = 0; i < results.boxes.size(); ++i) {
const int obj_id = results.ids[i];
const float score = results.scores[i];
if (results.scores[i] < score_threshold) {
continue;
}
const int obj_id = results.ids[i];
const float score = results.scores[i];
cv::Scalar color = GetMOTBoxColor(obj_id);
if (recorder != nullptr){
int id = results.ids[i];
auto iter = recorder->records.find(id);
if (iter != recorder->records.end()) {
for (int j = 0; j < iter->second.size(); j++) {
cv::Point center(iter->second[j][0], iter->second[j][1]);
cv::circle(vis_img, center, text_thickness, color);
}
}
}
cv::Point pt1 = cv::Point(results.boxes[i][0], results.boxes[i][1]);
cv::Point pt2 = cv::Point(results.boxes[i][2], results.boxes[i][3]);
cv::Point id_pt =
cv::Point(results.boxes[i][0], results.boxes[i][1] + 10);
cv::Point score_pt =
cv::Point(results.boxes[i][0], results.boxes[i][1] - 10);
cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
std::ostringstream idoss;
idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
idoss << obj_id;
std::string id_text = idoss.str();
cv::Scalar color = GetMOTBoxColor(obj_id);
cv::putText(vis_img,
id_text,
id_pt,
cv::FONT_HERSHEY_PLAIN,
text_scale,
color,
text_thickness);
cv::Point pt1 = cv::Point(results.boxes[i][0], results.boxes[i][1]);
cv::Point pt2 = cv::Point(results.boxes[i][2], results.boxes[i][3]);
cv::Point id_pt =
cv::Point(results.boxes[i][0], results.boxes[i][1] + 10);
cv::Point score_pt =
cv::Point(results.boxes[i][0], results.boxes[i][1] - 10);
cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
std::ostringstream soss;
soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
soss << score;
std::string score_text = soss.str();
std::ostringstream idoss;
idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
idoss << obj_id;
std::string id_text = idoss.str();
cv::putText(vis_img,
id_text,
id_pt,
cv::FONT_HERSHEY_PLAIN,
text_scale,
cv::Scalar(0, 255, 255),
text_thickness);
std::ostringstream soss;
soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
soss << score;
std::string score_text = soss.str();
cv::putText(vis_img,
score_text,
score_pt,
cv::FONT_HERSHEY_PLAIN,
text_scale,
cv::Scalar(0, 255, 255),
text_thickness);
cv::putText(vis_img,
score_text,
score_pt,
cv::FONT_HERSHEY_PLAIN,
text_scale,
color,
text_thickness);
}
return vis_img;
}

View File

@@ -17,6 +17,8 @@
#include "fastdeploy/vision/common/result.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "fastdeploy/vision/tracking/pptracking/model.h"
namespace fastdeploy {
namespace vision {
@@ -81,8 +83,9 @@ FASTDEPLOY_DECL cv::Mat VisMatting(const cv::Mat& im,
bool remove_small_connected_area = false);
FASTDEPLOY_DECL cv::Mat VisOcr(const cv::Mat& im, const OCRResult& ocr_result);
FASTDEPLOY_DECL cv::Mat VisMOT(const cv::Mat& img,const MOTResult& results, float fps=0.0, int frame_id=0);
FASTDEPLOY_DECL cv::Mat VisMOT(const cv::Mat& img, const MOTResult& results,
float score_threshold = 0.0f,
tracking::TrailRecorder* recorder = nullptr);
FASTDEPLOY_DECL cv::Mat SwapBackground(
const cv::Mat& im, const cv::Mat& background, const MattingResult& result,
bool remove_small_connected_area = false);

View File

@@ -86,9 +86,9 @@ void BindVisualize(pybind11::module& m) {
return TensorToPyArray(out);
})
.def("vis_mot",
[](pybind11::array& im_data, vision::MOTResult& result,float fps, int frame_id) {
[](pybind11::array& im_data, vision::MOTResult& result,float score_threshold, vision::tracking::TrailRecorder record) {
auto im = PyArrayToCvMat(im_data);
auto vis_im = vision::VisMOT(im, result,fps,frame_id);
auto vis_im = vision::VisMOT(im, result, score_threshold, &record);
FDTensor out;
vision::Mat(vis_im).ShareWithTensor(&out);
return TensorToPyArray(out);
@@ -185,9 +185,10 @@ void BindVisualize(pybind11::module& m) {
return TensorToPyArray(out);
})
.def_static("vis_mot",
[](pybind11::array& im_data, vision::MOTResult& result,float fps, int frame_id) {
[](pybind11::array& im_data, vision::MOTResult& result,float score_threshold,
vision::tracking::TrailRecorder* record) {
auto im = PyArrayToCvMat(im_data);
auto vis_im = vision::VisMOT(im, result,fps,frame_id);
auto vis_im = vision::VisMOT(im, result, score_threshold, record);
FDTensor out;
vision::Mat(vis_im).ShareWithTensor(&out);
return TensorToPyArray(out);