Use motion detection in analysis of video streams and video files to increase performance (Issue 39)

This commit is contained in:
Kees-V
2015-05-12 19:27:18 +02:00
parent e95ce76c02
commit 0c2495f26c
3 changed files with 91 additions and 7 deletions

View File

@@ -31,6 +31,7 @@
#include "support/timing.h"
#include "support/platform.h"
#include "video/videobuffer.h"
#include "video/motiondetector.h"
#include "alpr.h"
using namespace alpr;
@@ -39,6 +40,8 @@ const std::string MAIN_WINDOW_NAME = "ALPR main window";
const bool SAVE_LAST_VIDEO_STILL = false;
const std::string LAST_VIDEO_STILL_LOCATION = "/tmp/laststill.jpg";
MotionDetector motiondetector;
bool do_motiondetection = true;
/** Function Headers */
bool detectandshow(Alpr* alpr, cv::Mat frame, std::string region, bool writeJson);
@@ -74,6 +77,7 @@ int main( int argc, const char** argv )
TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false);
TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. [Experimental] Default=off", cmd, false);
TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false);
TCLAP::SwitchArg motiondetect("", "motion", "Use motion detection on video file or stream. Default=off", cmd, false);
try
{
@@ -101,6 +105,7 @@ int main( int argc, const char** argv )
templatePattern = templatePatternArg.getValue();
topn = topNArg.getValue();
measureProcessingTime = clockSwitch.getValue();
do_motiondetection = motiondetect.getValue();
}
catch (TCLAP::ArgException &e) // catch any exceptions
{
@@ -177,11 +182,13 @@ int main( int argc, const char** argv )
if (response != -1)
{
if (framenum == 0) motiondetector.ResetMotionDetection(&latestFrame);
detectandshow(&alpr, latestFrame, "", outputJson);
}
// Sleep 10ms
sleep_ms(10);
framenum++;
}
videoBuffer.disconnect();
@@ -206,7 +213,7 @@ int main( int argc, const char** argv )
cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame);
}
std::cout << "Frame: " << framenum << std::endl;
if (framenum == 0) motiondetector.ResetMotionDetection(&frame);
detectandshow(&alpr, frame, "", outputJson);
//create a 1ms delay
sleep_ms(1);
@@ -278,9 +285,14 @@ bool detectandshow( Alpr* alpr, cv::Mat frame, std::string region, bool writeJso
getTimeMonotonic(&startTime);
std::vector<AlprRegionOfInterest> regionsOfInterest;
regionsOfInterest.push_back(AlprRegionOfInterest(0,0, frame.cols, frame.rows));
AlprResults results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest );
if (do_motiondetection)
{
cv::Rect rectan = motiondetector.MotionDetect(&frame);
if (rectan.width>0) regionsOfInterest.push_back(AlprRegionOfInterest(rectan.x, rectan.y, rectan.width, rectan.height));
}
else regionsOfInterest.push_back(AlprRegionOfInterest(0, 0, frame.cols, frame.rows));
AlprResults results;
if (regionsOfInterest.size()>0) results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest);
timespec endTime;
getTimeMonotonic(&endTime);

View File

@@ -0,0 +1,59 @@
#include "motiondetector.h"
using namespace cv;
MotionDetector::MotionDetector()
{
pMOG2 = cv::createBackgroundSubtractorMOG2();
}
MotionDetector::~MotionDetector()
{
}
void MotionDetector::ResetMotionDetection(cv::Mat* frame)
{
pMOG2->apply(*frame, fgMaskMOG2, 1);
}
cv::Rect MotionDetector::MotionDetect(cv::Mat* frame)
//Detect motion and create ONE recangle that contains all the detected motion
{
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::Rect bounding_rect;
std::vector<cv::Rect> rects;
cv::Rect largest_rect, rect_temp;
// Detect motion
pMOG2->apply(*frame, fgMaskMOG2, -1);
//Remove noise
cv::erode(fgMaskMOG2, fgMaskMOG2, getStructuringElement(cv::MORPH_RECT, cv::Size(6, 6)));
// Find the contours of motion areas in the image
findContours(fgMaskMOG2, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Find the bounding rectangles of the areas of motion
if (contours.size() > 0)
{
for (int i = 0; i < contours.size(); i++)
{
bounding_rect = boundingRect(contours[i]);
rects.push_back(bounding_rect);
}
// Determine the overall area with motion.
largest_rect = rects[0];
for (int i = 1; i < rects.size(); i++)
{
rect_temp.x = min(largest_rect.x,rects[i].x);
rect_temp.y = min(largest_rect.y,rects[i].y);
rect_temp.width = max(largest_rect.x + largest_rect.width, rects[i].x + rects[i].width)-rect_temp.x;
rect_temp.height = max(largest_rect.y + largest_rect.height, rects[i].y + rects[i].height) - rect_temp.y;
largest_rect = rect_temp;
}
rectangle(*frame, rect_temp, cv::Scalar(0, 255, 0), 1, 8, 0);
}
else
largest_rect = { 0, 0, 0, 0 };
// imshow("Motion detect", fgMaskMOG2);
return largest_rect;
}

View File

@@ -0,0 +1,13 @@
#include "opencv2/opencv.hpp"
class MotionDetector
{
private: cv::Ptr<cv::BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
private: cv::Mat fgMaskMOG2;
public:
MotionDetector();
virtual ~MotionDetector();
void ResetMotionDetection(cv::Mat* frame);
cv::Rect MotionDetect(cv::Mat* frame);
};