mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 17:17:14 +08:00
[Model] add tracking trail on vis_mot (#461)
* add override mark * delete some * recovery * recovery * add tracking * add tracking py_bind and example * add pptracking * add pptracking * iomanip head file * add opencv_video lib * add python libs package Signed-off-by: ChaoII <849453582@qq.com> * complete comments Signed-off-by: ChaoII <849453582@qq.com> * add jdeTracker_ member variable Signed-off-by: ChaoII <849453582@qq.com> * add 'FASTDEPLOY_DECL' macro Signed-off-by: ChaoII <849453582@qq.com> * remove kwargs params Signed-off-by: ChaoII <849453582@qq.com> * [Doc]update pptracking docs * delete 'ENABLE_PADDLE_FRONTEND' switch * add pptracking unit test * update pptracking unit test Signed-off-by: ChaoII <849453582@qq.com> * modify test video file path and remove trt test * update unit test model url * remove 'FASTDEPLOY_DECL' macro Signed-off-by: ChaoII <849453582@qq.com> * fix build python packages about pptracking on win32 Signed-off-by: ChaoII <849453582@qq.com> * update comment Signed-off-by: ChaoII <849453582@qq.com> * add pptracking model explain Signed-off-by: ChaoII <849453582@qq.com> * add tracking trail on vis_mot * add tracking trail * modify code for some suggestion * remove unused import * fix import bug Signed-off-by: ChaoII <849453582@qq.com> Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
#pragma once
|
||||
#include "fastdeploy/fastdeploy_model.h"
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include <set>
|
||||
|
||||
namespace fastdeploy {
|
||||
/** \brief All C++ FastDeploy Vision Models APIs are defined inside this namespace
|
||||
@@ -171,6 +172,7 @@ struct FASTDEPLOY_DECL MOTResult : public BaseResult {
|
||||
/** \brief The classify label id for all the tracking object
|
||||
*/
|
||||
std::vector<int> class_ids;
|
||||
|
||||
ResultType type = ResultType::MOT;
|
||||
/// Clear MOT result
|
||||
void Clear();
|
||||
|
@@ -161,9 +161,7 @@ bool PPTracking::Initialize() {
|
||||
return false;
|
||||
}
|
||||
// create JDETracker instance
|
||||
std::unique_ptr<JDETracker> jdeTracker(new JDETracker);
|
||||
jdeTracker_ = std::move(jdeTracker);
|
||||
|
||||
jdeTracker_ = std::unique_ptr<JDETracker>(new JDETracker);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -245,7 +243,6 @@ bool PPTracking::Postprocess(std::vector<FDTensor>& infer_result, MOTResult *res
|
||||
cv::Mat dets(bbox_shape[0], 6, CV_32FC1, bbox_data);
|
||||
cv::Mat emb(bbox_shape[0], emb_shape[1], CV_32FC1, emb_data);
|
||||
|
||||
|
||||
result->Clear();
|
||||
std::vector<Track> tracks;
|
||||
std::vector<int> valid;
|
||||
@@ -264,7 +261,6 @@ bool PPTracking::Postprocess(std::vector<FDTensor>& infer_result, MOTResult *res
|
||||
result->boxes.push_back(box);
|
||||
result->ids.push_back(1);
|
||||
result->scores.push_back(*dets.ptr<float>(0, 4));
|
||||
|
||||
} else {
|
||||
std::vector<Track>::iterator titer;
|
||||
for (titer = tracks.begin(); titer != tracks.end(); ++titer) {
|
||||
@@ -285,9 +281,36 @@ bool PPTracking::Postprocess(std::vector<FDTensor>& infer_result, MOTResult *res
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!is_record_trail_) return true;
|
||||
int nums = result->boxes.size();
|
||||
for (int i=0; i<nums; i++) {
|
||||
float center_x = (result->boxes[i][0] + result->boxes[i][2]) / 2;
|
||||
float center_y = (result->boxes[i][1] + result->boxes[i][3]) / 2;
|
||||
int id = result->ids[i];
|
||||
recorder_->Add(id,{int(center_x), int(center_y)});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void PPTracking::BindRecorder(TrailRecorder* recorder){
|
||||
|
||||
recorder_ = recorder;
|
||||
is_record_trail_ = true;
|
||||
}
|
||||
|
||||
void PPTracking::UnbindRecorder(){
|
||||
|
||||
is_record_trail_ = false;
|
||||
std::map<int, std::vector<std::array<int, 2>>>::iterator iter;
|
||||
for(iter = recorder_->records.begin(); iter != recorder_->records.end(); iter++){
|
||||
iter->second.clear();
|
||||
iter->second.shrink_to_fit();
|
||||
}
|
||||
recorder_->records.clear();
|
||||
std::map<int, std::vector<std::array<int, 2>>>().swap(recorder_->records);
|
||||
recorder_ = nullptr;
|
||||
}
|
||||
|
||||
} // namespace tracking
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
@@ -14,6 +14,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include "fastdeploy/vision/common/processors/transform.h"
|
||||
#include "fastdeploy/fastdeploy_model.h"
|
||||
#include "fastdeploy/vision/common/result.h"
|
||||
@@ -22,6 +23,21 @@
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
namespace tracking {
|
||||
struct TrailRecorder{
|
||||
std::map<int, std::vector<std::array<int, 2>>> records;
|
||||
void Add(int id, const std::array<int, 2>& record);
|
||||
};
|
||||
|
||||
inline void TrailRecorder::Add(int id, const std::array<int, 2>& record) {
|
||||
auto iter = records.find(id);
|
||||
if (iter != records.end()) {
|
||||
auto trail = records[id];
|
||||
trail.push_back(record);
|
||||
records[id] = trail;
|
||||
} else {
|
||||
records[id] = {record};
|
||||
}
|
||||
}
|
||||
|
||||
class FASTDEPLOY_DECL PPTracking: public FastDeployModel {
|
||||
public:
|
||||
@@ -49,6 +65,14 @@ class FASTDEPLOY_DECL PPTracking: public FastDeployModel {
|
||||
* \return true if the prediction successed, otherwise false
|
||||
*/
|
||||
virtual bool Predict(cv::Mat* img, MOTResult* result);
|
||||
/** \brief bind tracking trail struct
|
||||
*
|
||||
* \param[in] recorder The MOT trail will record the trail of object
|
||||
*/
|
||||
void BindRecorder(TrailRecorder* recorder);
|
||||
/** \brief cancel binding and clear trail information
|
||||
*/
|
||||
void UnbindRecorder();
|
||||
|
||||
private:
|
||||
bool BuildPreprocessPipelineFromConfig();
|
||||
@@ -65,8 +89,11 @@ class FASTDEPLOY_DECL PPTracking: public FastDeployModel {
|
||||
float conf_thresh_;
|
||||
float tracked_thresh_;
|
||||
float min_box_area_;
|
||||
bool is_record_trail_ = false;
|
||||
std::unique_ptr<JDETracker> jdeTracker_;
|
||||
TrailRecorder *recorder_ = nullptr;
|
||||
};
|
||||
|
||||
} // namespace tracking
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
@@ -15,6 +15,11 @@
|
||||
|
||||
namespace fastdeploy {
|
||||
void BindPPTracking(pybind11::module &m) {
|
||||
|
||||
pybind11::class_<vision::tracking::TrailRecorder>(m, "TrailRecorder")
|
||||
.def(pybind11::init<>())
|
||||
.def_readwrite("records", &vision::tracking::TrailRecorder::records)
|
||||
.def("add", &vision::tracking::TrailRecorder::Add);
|
||||
pybind11::class_<vision::tracking::PPTracking, FastDeployModel>(
|
||||
m, "PPTracking")
|
||||
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption,
|
||||
@@ -26,6 +31,8 @@ void BindPPTracking(pybind11::module &m) {
|
||||
vision::MOTResult *res = new vision::MOTResult();
|
||||
self.Predict(&mat, res);
|
||||
return res;
|
||||
});
|
||||
})
|
||||
.def("bind_recorder", &vision::tracking::PPTracking::BindRecorder)
|
||||
.def("unbind_recorder", &vision::tracking::PPTracking::UnbindRecorder);
|
||||
}
|
||||
} // namespace fastdeploy
|
||||
|
@@ -118,7 +118,7 @@ void Trajectory::update(Trajectory *traj,
|
||||
if (update_embedding_) update_embedding(traj->current_embedding);
|
||||
}
|
||||
|
||||
void Trajectory::activate(int &cnt,int timestamp_) {
|
||||
void Trajectory::activate(int &cnt, int timestamp_) {
|
||||
id = next_id(cnt);
|
||||
TKalmanFilter::init(cv::Mat(xyah));
|
||||
length = 0;
|
||||
@@ -130,7 +130,7 @@ void Trajectory::activate(int &cnt,int timestamp_) {
|
||||
starttime = timestamp_;
|
||||
}
|
||||
|
||||
void Trajectory::reactivate(Trajectory *traj,int &cnt, int timestamp_, bool newid) {
|
||||
void Trajectory::reactivate(Trajectory *traj, int &cnt, int timestamp_, bool newid) {
|
||||
TKalmanFilter::correct(cv::Mat(traj->xyah));
|
||||
update_embedding(traj->current_embedding);
|
||||
length = 0;
|
||||
|
@@ -74,8 +74,8 @@ class FASTDEPLOY_DECL Trajectory : public TKalmanFilter {
|
||||
virtual void update(Trajectory *traj,
|
||||
int timestamp,
|
||||
bool update_embedding = true);
|
||||
virtual void activate(int& cnt, int timestamp);
|
||||
virtual void reactivate(Trajectory *traj, int & cnt,int timestamp, bool newid = false);
|
||||
virtual void activate(int &cnt, int timestamp);
|
||||
virtual void reactivate(Trajectory *traj, int &cnt, int timestamp, bool newid = false);
|
||||
virtual void mark_lost(void);
|
||||
virtual void mark_removed(void);
|
||||
|
||||
|
@@ -25,73 +25,63 @@ cv::Scalar GetMOTBoxColor(int idx) {
|
||||
return color;
|
||||
}
|
||||
|
||||
|
||||
cv::Mat VisMOT(const cv::Mat &img, const MOTResult &results, float fps, int frame_id) {
|
||||
|
||||
cv::Mat VisMOT(const cv::Mat &img, const MOTResult &results,
|
||||
float score_threshold, tracking::TrailRecorder* recorder) {
|
||||
cv::Mat vis_img = img.clone();
|
||||
int im_h = img.rows;
|
||||
int im_w = img.cols;
|
||||
float text_scale = std::max(1, static_cast<int>(im_w / 1600.));
|
||||
float text_thickness = 2.;
|
||||
float line_thickness = std::max(1, static_cast<int>(im_w / 500.));
|
||||
|
||||
std::ostringstream oss;
|
||||
oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
|
||||
oss << "frame: " << frame_id << " ";
|
||||
oss << "fps: " << fps << " ";
|
||||
oss << "num: " << results.boxes.size();
|
||||
std::string text = oss.str();
|
||||
|
||||
cv::Point origin;
|
||||
origin.x = 0;
|
||||
origin.y = static_cast<int>(15 * text_scale);
|
||||
cv::putText(vis_img,
|
||||
text,
|
||||
origin,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
cv::Scalar(0, 0, 255),
|
||||
text_thickness);
|
||||
|
||||
for (int i = 0; i < results.boxes.size(); ++i) {
|
||||
const int obj_id = results.ids[i];
|
||||
const float score = results.scores[i];
|
||||
if (results.scores[i] < score_threshold) {
|
||||
continue;
|
||||
}
|
||||
const int obj_id = results.ids[i];
|
||||
const float score = results.scores[i];
|
||||
cv::Scalar color = GetMOTBoxColor(obj_id);
|
||||
if (recorder != nullptr){
|
||||
int id = results.ids[i];
|
||||
auto iter = recorder->records.find(id);
|
||||
if (iter != recorder->records.end()) {
|
||||
for (int j = 0; j < iter->second.size(); j++) {
|
||||
cv::Point center(iter->second[j][0], iter->second[j][1]);
|
||||
cv::circle(vis_img, center, text_thickness, color);
|
||||
}
|
||||
}
|
||||
}
|
||||
cv::Point pt1 = cv::Point(results.boxes[i][0], results.boxes[i][1]);
|
||||
cv::Point pt2 = cv::Point(results.boxes[i][2], results.boxes[i][3]);
|
||||
cv::Point id_pt =
|
||||
cv::Point(results.boxes[i][0], results.boxes[i][1] + 10);
|
||||
cv::Point score_pt =
|
||||
cv::Point(results.boxes[i][0], results.boxes[i][1] - 10);
|
||||
cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
|
||||
std::ostringstream idoss;
|
||||
idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
|
||||
idoss << obj_id;
|
||||
std::string id_text = idoss.str();
|
||||
|
||||
cv::Scalar color = GetMOTBoxColor(obj_id);
|
||||
cv::putText(vis_img,
|
||||
id_text,
|
||||
id_pt,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
color,
|
||||
text_thickness);
|
||||
|
||||
cv::Point pt1 = cv::Point(results.boxes[i][0], results.boxes[i][1]);
|
||||
cv::Point pt2 = cv::Point(results.boxes[i][2], results.boxes[i][3]);
|
||||
cv::Point id_pt =
|
||||
cv::Point(results.boxes[i][0], results.boxes[i][1] + 10);
|
||||
cv::Point score_pt =
|
||||
cv::Point(results.boxes[i][0], results.boxes[i][1] - 10);
|
||||
cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
|
||||
std::ostringstream soss;
|
||||
soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
|
||||
soss << score;
|
||||
std::string score_text = soss.str();
|
||||
|
||||
std::ostringstream idoss;
|
||||
idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
|
||||
idoss << obj_id;
|
||||
std::string id_text = idoss.str();
|
||||
|
||||
cv::putText(vis_img,
|
||||
id_text,
|
||||
id_pt,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
cv::Scalar(0, 255, 255),
|
||||
text_thickness);
|
||||
|
||||
std::ostringstream soss;
|
||||
soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
|
||||
soss << score;
|
||||
std::string score_text = soss.str();
|
||||
|
||||
cv::putText(vis_img,
|
||||
score_text,
|
||||
score_pt,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
cv::Scalar(0, 255, 255),
|
||||
text_thickness);
|
||||
cv::putText(vis_img,
|
||||
score_text,
|
||||
score_pt,
|
||||
cv::FONT_HERSHEY_PLAIN,
|
||||
text_scale,
|
||||
color,
|
||||
text_thickness);
|
||||
}
|
||||
return vis_img;
|
||||
}
|
||||
|
@@ -17,6 +17,8 @@
|
||||
|
||||
#include "fastdeploy/vision/common/result.h"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "fastdeploy/vision/tracking/pptracking/model.h"
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
|
||||
@@ -81,8 +83,9 @@ FASTDEPLOY_DECL cv::Mat VisMatting(const cv::Mat& im,
|
||||
bool remove_small_connected_area = false);
|
||||
FASTDEPLOY_DECL cv::Mat VisOcr(const cv::Mat& im, const OCRResult& ocr_result);
|
||||
|
||||
FASTDEPLOY_DECL cv::Mat VisMOT(const cv::Mat& img,const MOTResult& results, float fps=0.0, int frame_id=0);
|
||||
|
||||
FASTDEPLOY_DECL cv::Mat VisMOT(const cv::Mat& img, const MOTResult& results,
|
||||
float score_threshold = 0.0f,
|
||||
tracking::TrailRecorder* recorder = nullptr);
|
||||
FASTDEPLOY_DECL cv::Mat SwapBackground(
|
||||
const cv::Mat& im, const cv::Mat& background, const MattingResult& result,
|
||||
bool remove_small_connected_area = false);
|
||||
|
@@ -86,9 +86,9 @@ void BindVisualize(pybind11::module& m) {
|
||||
return TensorToPyArray(out);
|
||||
})
|
||||
.def("vis_mot",
|
||||
[](pybind11::array& im_data, vision::MOTResult& result,float fps, int frame_id) {
|
||||
[](pybind11::array& im_data, vision::MOTResult& result,float score_threshold, vision::tracking::TrailRecorder record) {
|
||||
auto im = PyArrayToCvMat(im_data);
|
||||
auto vis_im = vision::VisMOT(im, result,fps,frame_id);
|
||||
auto vis_im = vision::VisMOT(im, result, score_threshold, &record);
|
||||
FDTensor out;
|
||||
vision::Mat(vis_im).ShareWithTensor(&out);
|
||||
return TensorToPyArray(out);
|
||||
@@ -185,9 +185,10 @@ void BindVisualize(pybind11::module& m) {
|
||||
return TensorToPyArray(out);
|
||||
})
|
||||
.def_static("vis_mot",
|
||||
[](pybind11::array& im_data, vision::MOTResult& result,float fps, int frame_id) {
|
||||
[](pybind11::array& im_data, vision::MOTResult& result,float score_threshold,
|
||||
vision::tracking::TrailRecorder* record) {
|
||||
auto im = PyArrayToCvMat(im_data);
|
||||
auto vis_im = vision::VisMOT(im, result,fps,frame_id);
|
||||
auto vis_im = vision::VisMOT(im, result, score_threshold, record);
|
||||
FDTensor out;
|
||||
vision::Mat(vis_im).ShareWithTensor(&out);
|
||||
return TensorToPyArray(out);
|
||||
|
Reference in New Issue
Block a user