Use motion detection in analysis of video streams and video files to increase performance (Issue 39)

This commit is contained in:
Kees-V
2015-05-12 19:27:18 +02:00
parent e95ce76c02
commit 0c2495f26c
3 changed files with 91 additions and 7 deletions

View File

@@ -31,6 +31,7 @@
#include "support/timing.h"
#include "support/platform.h"
#include "video/videobuffer.h"
#include "video/motiondetector.h"
#include "alpr.h"
using namespace alpr;
@@ -39,6 +40,8 @@ const std::string MAIN_WINDOW_NAME = "ALPR main window";
const bool SAVE_LAST_VIDEO_STILL = false;
const std::string LAST_VIDEO_STILL_LOCATION = "/tmp/laststill.jpg";
MotionDetector motiondetector;
bool do_motiondetection = true;
/** Function Headers */
bool detectandshow(Alpr* alpr, cv::Mat frame, std::string region, bool writeJson);
@@ -74,6 +77,7 @@ int main( int argc, const char** argv )
TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false);
TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. [Experimental] Default=off", cmd, false);
TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false);
TCLAP::SwitchArg motiondetect("", "motion", "Use motion detection on video file or stream. Default=off", cmd, false);
try
{
@@ -101,6 +105,7 @@ int main( int argc, const char** argv )
templatePattern = templatePatternArg.getValue();
topn = topNArg.getValue();
measureProcessingTime = clockSwitch.getValue();
do_motiondetection = motiondetect.getValue();
}
catch (TCLAP::ArgException &e) // catch any exceptions
{
@@ -177,12 +182,14 @@ int main( int argc, const char** argv )
if (response != -1)
{
detectandshow( &alpr, latestFrame, "", outputJson);
if (framenum == 0) motiondetector.ResetMotionDetection(&latestFrame);
detectandshow(&alpr, latestFrame, "", outputJson);
}
// Sleep 10ms
sleep_ms(10);
}
framenum++;
}
videoBuffer.disconnect();
@@ -206,8 +213,8 @@ int main( int argc, const char** argv )
cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame);
}
std::cout << "Frame: " << framenum << std::endl;
detectandshow( &alpr, frame, "", outputJson);
if (framenum == 0) motiondetector.ResetMotionDetection(&frame);
detectandshow(&alpr, frame, "", outputJson);
//create a 1ms delay
sleep_ms(1);
framenum++;
@@ -278,9 +285,14 @@ bool detectandshow( Alpr* alpr, cv::Mat frame, std::string region, bool writeJso
getTimeMonotonic(&startTime);
std::vector<AlprRegionOfInterest> regionsOfInterest;
regionsOfInterest.push_back(AlprRegionOfInterest(0,0, frame.cols, frame.rows));
AlprResults results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest );
if (do_motiondetection)
{
cv::Rect rectan = motiondetector.MotionDetect(&frame);
if (rectan.width>0) regionsOfInterest.push_back(AlprRegionOfInterest(rectan.x, rectan.y, rectan.width, rectan.height));
}
else regionsOfInterest.push_back(AlprRegionOfInterest(0, 0, frame.cols, frame.rows));
AlprResults results;
if (regionsOfInterest.size()>0) results = alpr->recognize(frame.data, frame.elemSize(), frame.cols, frame.rows, regionsOfInterest);
timespec endTime;
getTimeMonotonic(&endTime);