From 0c2495f26c84166c1d52291bf0e7c58b770fd2b7 Mon Sep 17 00:00:00 2001 From: Kees-V Date: Tue, 12 May 2015 19:27:18 +0200 Subject: [PATCH] Use motion detection in analysis of video streams and video files to increase performance (Issue 39) --- src/main.cpp | 26 +++++++++++----- src/video/motiondetector.cpp | 59 ++++++++++++++++++++++++++++++++++++ src/video/motiondetector.h | 13 ++++++++ 3 files changed, 91 insertions(+), 7 deletions(-) create mode 100644 src/video/motiondetector.cpp create mode 100644 src/video/motiondetector.h diff --git a/src/main.cpp b/src/main.cpp index 5bd32a4..67a5612 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -31,6 +31,7 @@ #include "support/timing.h" #include "support/platform.h" #include "video/videobuffer.h" +#include "video/motiondetector.h" #include "alpr.h" using namespace alpr; @@ -39,6 +40,8 @@ const std::string MAIN_WINDOW_NAME = "ALPR main window"; const bool SAVE_LAST_VIDEO_STILL = false; const std::string LAST_VIDEO_STILL_LOCATION = "/tmp/laststill.jpg"; +MotionDetector motiondetector; +bool do_motiondetection = true; /** Function Headers */ bool detectandshow(Alpr* alpr, cv::Mat frame, std::string region, bool writeJson); @@ -74,6 +77,7 @@ int main( int argc, const char** argv ) TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false); TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. [Experimental] Default=off", cmd, false); TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false); + TCLAP::SwitchArg motiondetect("", "motion", "Use motion detection on video file or stream. Default=off", cmd, false); try { @@ -101,6 +105,7 @@ int main( int argc, const char** argv ) templatePattern = templatePatternArg.getValue(); topn = topNArg.getValue(); measureProcessingTime = clockSwitch.getValue(); + do_motiondetection = motiondetect.getValue(); } catch (TCLAP::ArgException &e) // catch any exceptions { @@ -177,12 +182,14 @@ int main( int argc, const char** argv ) if (response != -1) { - detectandshow( &alpr, latestFrame, "", outputJson); + if (framenum == 0) motiondetector.ResetMotionDetection(&latestFrame); + detectandshow(&alpr, latestFrame, "", outputJson); } // Sleep 10ms sleep_ms(10); - } + framenum++; + } videoBuffer.disconnect(); @@ -206,8 +213,8 @@ int main( int argc, const char** argv ) cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame); } std::cout << "Frame: " << framenum << std::endl; - - detectandshow( &alpr, frame, "", outputJson); + if (framenum == 0) motiondetector.ResetMotionDetection(&frame); + detectandshow(&alpr, frame, "", outputJson); //create a 1ms delay sleep_ms(1); framenum++; @@ -278,9 +285,14 @@ bool detectandshow( Alpr* alpr, cv::Mat frame, std::string region, bool writeJso getTimeMonotonic(&startTime); std::vector regionsOfInterest; - regionsOfInterest.push_back(AlprRegionOfInterest(0,0, frame.cols, frame.rows)); - - AlprResults results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest ); + if (do_motiondetection) + { + cv::Rect rectan = motiondetector.MotionDetect(&frame); + if (rectan.width>0) regionsOfInterest.push_back(AlprRegionOfInterest(rectan.x, rectan.y, rectan.width, rectan.height)); + } + else regionsOfInterest.push_back(AlprRegionOfInterest(0, 0, frame.cols, frame.rows)); + AlprResults results; + if (regionsOfInterest.size()>0) results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest); timespec endTime; getTimeMonotonic(&endTime); diff --git a/src/video/motiondetector.cpp b/src/video/motiondetector.cpp new file mode 100644 index 0000000..c115c2b --- /dev/null +++ b/src/video/motiondetector.cpp @@ -0,0 +1,59 @@ +#include "motiondetector.h" + +using namespace cv; + +MotionDetector::MotionDetector() +{ + pMOG2 = cv::createBackgroundSubtractorMOG2(); +} + +MotionDetector::~MotionDetector() +{ + +} + +void MotionDetector::ResetMotionDetection(cv::Mat* frame) +{ + pMOG2->apply(*frame, fgMaskMOG2, 1); +} + +cv::Rect MotionDetector::MotionDetect(cv::Mat* frame) +//Detect motion and create ONE recangle that contains all the detected motion +{ + std::vector > contours; + std::vector hierarchy; + cv::Rect bounding_rect; + std::vector rects; + cv::Rect largest_rect, rect_temp; + + // Detect motion + pMOG2->apply(*frame, fgMaskMOG2, -1); + //Remove noise + cv::erode(fgMaskMOG2, fgMaskMOG2, getStructuringElement(cv::MORPH_RECT, cv::Size(6, 6))); + // Find the contours of motion areas in the image + findContours(fgMaskMOG2, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); + // Find the bounding rectangles of the areas of motion + if (contours.size() > 0) + { + for (int i = 0; i < contours.size(); i++) + { + bounding_rect = boundingRect(contours[i]); + rects.push_back(bounding_rect); + } + // Determine the overall area with motion. + largest_rect = rects[0]; + for (int i = 1; i < rects.size(); i++) + { + rect_temp.x = min(largest_rect.x,rects[i].x); + rect_temp.y = min(largest_rect.y,rects[i].y); + rect_temp.width = max(largest_rect.x + largest_rect.width, rects[i].x + rects[i].width)-rect_temp.x; + rect_temp.height = max(largest_rect.y + largest_rect.height, rects[i].y + rects[i].height) - rect_temp.y; + largest_rect = rect_temp; + } + rectangle(*frame, rect_temp, cv::Scalar(0, 255, 0), 1, 8, 0); + } + else + largest_rect = { 0, 0, 0, 0 }; +// imshow("Motion detect", fgMaskMOG2); + return largest_rect; +} diff --git a/src/video/motiondetector.h b/src/video/motiondetector.h new file mode 100644 index 0000000..7522f53 --- /dev/null +++ b/src/video/motiondetector.h @@ -0,0 +1,13 @@ +#include "opencv2/opencv.hpp" + +class MotionDetector +{ + private: cv::Ptr pMOG2; //MOG2 Background subtractor + private: cv::Mat fgMaskMOG2; + public: + MotionDetector(); + virtual ~MotionDetector(); + + void ResetMotionDetection(cv::Mat* frame); + cv::Rect MotionDetect(cv::Mat* frame); +};