Merge branch 'master' of git://github.com/Kees-V/openalpr into keesmotion

This commit is contained in:
Matt Hill
2015-05-14 07:23:15 -04:00
3 changed files with 93 additions and 8 deletions

View File

@@ -31,6 +31,7 @@
#include "support/timing.h" #include "support/timing.h"
#include "support/platform.h" #include "support/platform.h"
#include "video/videobuffer.h" #include "video/videobuffer.h"
#include "video/motiondetector.h"
#include "alpr.h" #include "alpr.h"
using namespace alpr; using namespace alpr;
@@ -39,6 +40,8 @@ const std::string MAIN_WINDOW_NAME = "ALPR main window";
const bool SAVE_LAST_VIDEO_STILL = false; const bool SAVE_LAST_VIDEO_STILL = false;
const std::string LAST_VIDEO_STILL_LOCATION = "/tmp/laststill.jpg"; const std::string LAST_VIDEO_STILL_LOCATION = "/tmp/laststill.jpg";
MotionDetector motiondetector;
bool do_motiondetection = true;
/** Function Headers */ /** Function Headers */
bool detectandshow(Alpr* alpr, cv::Mat frame, std::string region, bool writeJson); bool detectandshow(Alpr* alpr, cv::Mat frame, std::string region, bool writeJson);
@@ -75,6 +78,7 @@ int main( int argc, const char** argv )
TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false); TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false);
TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. [Experimental] Default=off", cmd, false); TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. [Experimental] Default=off", cmd, false);
TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false); TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false);
TCLAP::SwitchArg motiondetect("", "motion", "Use motion detection on video file or stream. Default=off", cmd, false);
try try
{ {
@@ -102,6 +106,7 @@ int main( int argc, const char** argv )
templatePattern = templatePatternArg.getValue(); templatePattern = templatePatternArg.getValue();
topn = topNArg.getValue(); topn = topNArg.getValue();
measureProcessingTime = clockSwitch.getValue(); measureProcessingTime = clockSwitch.getValue();
do_motiondetection = motiondetect.getValue();
} }
catch (TCLAP::ArgException &e) // catch any exceptions catch (TCLAP::ArgException &e) // catch any exceptions
{ {
@@ -156,7 +161,8 @@ int main( int argc, const char** argv )
while (cap.read(frame)) while (cap.read(frame))
{ {
detectandshow(&alpr, frame, "", outputJson); if (framenum == 0) motiondetector.ResetMotionDetection(&frame);
detectandshow(&alpr, frame, "", outputJson);
sleep_ms(10); sleep_ms(10);
framenum++; framenum++;
} }
@@ -178,12 +184,14 @@ int main( int argc, const char** argv )
if (response != -1) if (response != -1)
{ {
detectandshow( &alpr, latestFrame, "", outputJson); if (framenum == 0) motiondetector.ResetMotionDetection(&latestFrame);
detectandshow(&alpr, latestFrame, "", outputJson);
} }
// Sleep 10ms // Sleep 10ms
sleep_ms(10); sleep_ms(10);
} framenum++;
}
videoBuffer.disconnect(); videoBuffer.disconnect();
@@ -207,8 +215,8 @@ int main( int argc, const char** argv )
cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame); cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame);
} }
std::cout << "Frame: " << framenum << std::endl; std::cout << "Frame: " << framenum << std::endl;
if (framenum == 0) motiondetector.ResetMotionDetection(&frame);
detectandshow( &alpr, frame, "", outputJson); detectandshow(&alpr, frame, "", outputJson);
//create a 1ms delay //create a 1ms delay
sleep_ms(1); sleep_ms(1);
framenum++; framenum++;
@@ -283,9 +291,14 @@ bool detectandshow( Alpr* alpr, cv::Mat frame, std::string region, bool writeJso
getTimeMonotonic(&startTime); getTimeMonotonic(&startTime);
std::vector<AlprRegionOfInterest> regionsOfInterest; std::vector<AlprRegionOfInterest> regionsOfInterest;
regionsOfInterest.push_back(AlprRegionOfInterest(0,0, frame.cols, frame.rows)); if (do_motiondetection)
{
AlprResults results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest ); cv::Rect rectan = motiondetector.MotionDetect(&frame);
if (rectan.width>0) regionsOfInterest.push_back(AlprRegionOfInterest(rectan.x, rectan.y, rectan.width, rectan.height));
}
else regionsOfInterest.push_back(AlprRegionOfInterest(0, 0, frame.cols, frame.rows));
AlprResults results;
if (regionsOfInterest.size()>0) results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest);
timespec endTime; timespec endTime;
getTimeMonotonic(&endTime); getTimeMonotonic(&endTime);

View File

@@ -0,0 +1,59 @@
#include "motiondetector.h"
using namespace cv;
MotionDetector::MotionDetector()
{
pMOG2 = new BackgroundSubtractorMOG2();
}
MotionDetector::~MotionDetector()
{
}
void MotionDetector::ResetMotionDetection(cv::Mat* frame)
{
pMOG2->operator()(*frame, fgMaskMOG2, 1);
}
cv::Rect MotionDetector::MotionDetect(cv::Mat* frame)
//Detect motion and create ONE recangle that contains all the detected motion
{
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::Rect bounding_rect;
std::vector<cv::Rect> rects;
cv::Rect largest_rect, rect_temp;
// Detect motion
pMOG2->operator()(*frame, fgMaskMOG2, -1);
//Remove noise
cv::erode(fgMaskMOG2, fgMaskMOG2, getStructuringElement(cv::MORPH_RECT, cv::Size(6, 6)));
// Find the contours of motion areas in the image
findContours(fgMaskMOG2, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Find the bounding rectangles of the areas of motion
if (contours.size() > 0)
{
for (int i = 0; i < contours.size(); i++)
{
bounding_rect = boundingRect(contours[i]);
rects.push_back(bounding_rect);
}
// Determine the overall area with motion.
largest_rect = rects[0];
for (int i = 1; i < rects.size(); i++)
{
rect_temp.x = min(largest_rect.x,rects[i].x);
rect_temp.y = min(largest_rect.y,rects[i].y);
rect_temp.width = max(largest_rect.x + largest_rect.width, rects[i].x + rects[i].width)-rect_temp.x;
rect_temp.height = max(largest_rect.y + largest_rect.height, rects[i].y + rects[i].height) - rect_temp.y;
largest_rect = rect_temp;
}
rectangle(*frame, rect_temp, cv::Scalar(0, 255, 0), 1, 8, 0);
}
else
largest_rect = { 0, 0, 0, 0 };
// imshow("Motion detect", fgMaskMOG2);
return largest_rect;
}

View File

@@ -0,0 +1,13 @@
#include "opencv2/opencv.hpp"
class MotionDetector
{
private: cv::Ptr<cv::BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
private: cv::Mat fgMaskMOG2;
public:
MotionDetector();
virtual ~MotionDetector();
void ResetMotionDetection(cv::Mat* frame);
cv::Rect MotionDetect(cv::Mat* frame);
};