Compare commits

...

35 Commits

Author SHA1 Message Date
Blake Blackshear
30ad0e30f8 allow mqtt password to be set by env var 2020-03-01 07:23:56 -06:00
Blake Blackshear
7bad89c9bf update benchmark script to mirror actual frigate use 2020-03-01 07:16:56 -06:00
Blake Blackshear
f077c397f4 improve detection processing and restart when stuck 2020-03-01 07:16:49 -06:00
Blake Blackshear
cc729d83a8 handle ffmpeg process failures in the camera process itself 2020-02-26 19:02:12 -06:00
Blake Blackshear
c520b81e49 add a few print statements for debugging 2020-02-25 20:37:12 -06:00
Blake Blackshear
9c304391c0 dont kill the camera process from the main process 2020-02-25 20:36:30 -06:00
Blake Blackshear
9a12b02d22 increase the buffer size a bit 2020-02-24 20:05:30 -06:00
Blake Blackshear
7686c510b3 add a few more metrics to debug 2020-02-23 18:11:39 -06:00
Blake Blackshear
2f5e322d3c cleanup the plasma store when finished with a frame 2020-02-23 18:11:08 -06:00
Blake Blackshear
1cd4c12104 dont redirect stdout for plasma store 2020-02-23 15:53:17 -06:00
Blake Blackshear
1a8b034685 reset detection fps 2020-02-23 15:53:00 -06:00
Blake Blackshear
da6dc03a57 dont change dictionary while iterating 2020-02-23 11:18:00 -06:00
Blake Blackshear
7fa3b70d2d allow specifying the frame size in the config instead of detecting 2020-02-23 07:56:14 -06:00
Blake Blackshear
1fc5a2bfd4 ensure missing objects are expired even when other object types are in the frame 2020-02-23 07:55:51 -06:00
Blake Blackshear
7e84da7dad Fix watchdog last_frame calculation 2020-02-23 07:55:16 -06:00
Blake Blackshear
128be72e28 cleanup 2020-02-22 09:15:29 -06:00
Blake Blackshear
aaddedc95c update docs and add back benchmark 2020-02-22 09:10:37 -06:00
Blake Blackshear
ba919fb439 fix watchdog 2020-02-22 09:10:37 -06:00
Blake Blackshear
b1d563f3c4 check avg wait before dropping frames 2020-02-22 09:10:37 -06:00
Blake Blackshear
204d8af5df fix watchdog restart 2020-02-22 09:10:37 -06:00
Blake Blackshear
b507a73d79 improve watchdog and coral fps tracking 2020-02-22 09:10:37 -06:00
Blake Blackshear
66eeb8b5cb dont log http requests 2020-02-22 09:10:37 -06:00
Blake Blackshear
efa67067c6 cleanup 2020-02-22 09:10:37 -06:00
Blake Blackshear
aeb036f1a4 add models and convert speed to ms 2020-02-22 09:10:37 -06:00
Blake Blackshear
74c528f9dc add watchdog for camera processes 2020-02-22 09:10:34 -06:00
Blake Blackshear
f2d54bec43 cleanup old code 2020-02-22 09:09:36 -06:00
Blake Blackshear
f07d57741e add a min_fps option 2020-02-22 09:06:46 -06:00
Blake Blackshear
2c1ec19f98 check plasma store and consolidate frame drawing 2020-02-22 09:06:46 -06:00
Blake Blackshear
6a9027c002 split into separate processes 2020-02-22 09:06:43 -06:00
Blake Blackshear
60c15e4419 update tflite to 2.1.0 2020-02-22 09:05:26 -06:00
Blake Blackshear
03dbf600aa refactor some classes into new files 2020-02-22 09:05:26 -06:00
Blake Blackshear
fbbb79b31b tweak process handoff 2020-02-22 09:05:26 -06:00
Blake Blackshear
496c6bc6c4 Mostly working detection in a separate process 2020-02-22 09:05:26 -06:00
Blake Blackshear
869a81c944 read from ffmpeg 2020-02-22 09:05:26 -06:00
Blake Blackshear
5b1884cfb3 WIP: revamp to incorporate motion 2020-02-22 09:05:26 -06:00
14 changed files with 1087 additions and 1084 deletions

73
Dockerfile Normal file → Executable file
View File

@@ -4,50 +4,57 @@ LABEL maintainer "blakeb@blakeshome.com"
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
# Install packages for apt repo # Install packages for apt repo
RUN apt -qq update && apt -qq install --no-install-recommends -y \ RUN apt -qq update && apt -qq install --no-install-recommends -y \
apt-transport-https ca-certificates \ software-properties-common \
gnupg wget \ # apt-transport-https ca-certificates \
ffmpeg \ build-essential \
python3 \ gnupg wget unzip \
python3-pip \ # libcap-dev \
python3-dev \ && add-apt-repository ppa:deadsnakes/ppa -y \
python3-numpy \ && apt -qq install --no-install-recommends -y \
# python-prctl python3.7 \
build-essential libcap-dev \ python3.7-dev \
# pillow-simd python3-pip \
# zlib1g-dev libjpeg-dev \ ffmpeg \
# VAAPI drivers for Intel hardware accel # VAAPI drivers for Intel hardware accel
libva-drm2 libva2 i965-va-driver vainfo \ libva-drm2 libva2 i965-va-driver vainfo \
&& python3.7 -m pip install -U wheel setuptools \
&& python3.7 -m pip install -U \
opencv-python-headless \
# python-prctl \
numpy \
imutils \
scipy \
&& python3.7 -m pip install -U \
SharedArray \
Flask \
paho-mqtt \
PyYAML \
matplotlib \
pyarrow \
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \ && echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
&& wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ && wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& apt -qq update \ && apt -qq update \
&& echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections \ && echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections \
&& apt -qq install --no-install-recommends -y \ && apt -qq install --no-install-recommends -y \
libedgetpu1-max \ libedgetpu1-max \
python3-edgetpu \ ## Tensorflow lite (python 3.7 only)
&& wget -q https://dl.google.com/coral/python/tflite_runtime-2.1.0-cp37-cp37m-linux_x86_64.whl \
&& python3.7 -m pip install tflite_runtime-2.1.0-cp37-cp37m-linux_x86_64.whl \
&& rm tflite_runtime-2.1.0-cp37-cp37m-linux_x86_64.whl \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y) && (apt-get autoremove -y; apt-get autoclean -y)
# needs to be installed before others # get model and labels
RUN pip3 install -U wheel setuptools RUN wget -q https://github.com/google-coral/edgetpu/raw/master/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O /edgetpu_model.tflite --trust-server-names
RUN wget -q https://dl.google.com/coral/canned_models/coco_labels.txt -O /labelmap.txt --trust-server-names
RUN pip3 install -U \ RUN wget -q https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip -O /cpu_model.zip && \
opencv-python-headless \ unzip /cpu_model.zip detect.tflite -d / && \
python-prctl \ mv /detect.tflite /cpu_model.tflite && \
Flask \ rm /cpu_model.zip
paho-mqtt \
PyYAML \
matplotlib \
scipy
# symlink the model and labels
RUN wget -q https://github.com/google-coral/edgetpu/raw/master/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --trust-server-names
RUN wget -q https://dl.google.com/coral/canned_models/coco_labels.txt -O coco_labels.txt --trust-server-names
RUN ln -s mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
RUN ln -s /coco_labels.txt /label_map.pbtext
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
ADD frigate frigate/ ADD frigate frigate/
COPY detect_objects.py . COPY detect_objects.py .
COPY benchmark.py . COPY benchmark.py .
CMD ["python3", "-u", "detect_objects.py"] CMD ["python3.7", "-u", "detect_objects.py"]

View File

@@ -1,14 +1,13 @@
# Frigate - Realtime Object Detection for IP Cameras # Frigate - Realtime Object Detection for IP Cameras
**Note:** This version requires the use of a [Google Coral USB Accelerator](https://coral.withgoogle.com/products/accelerator/)
Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. Designed for integration with HomeAssistant or others via MQTT. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. Designed for integration with HomeAssistant or others via MQTT.
- Leverages multiprocessing and threads heavily with an emphasis on realtime over processing every frame Use of a [Google Coral USB Accelerator](https://coral.withgoogle.com/products/accelerator/) is optional, but highly recommended. On my Intel i7 processor, I can process 2-3 FPS with the CPU. The Coral can process 100+ FPS with very low CPU load.
- Allows you to define specific regions (squares) in the image to look for objects
- No motion detection (for now) - Leverages multiprocessing heavily with an emphasis on realtime over processing every frame
- Object detection with Tensorflow runs in a separate thread - Uses a very low overhead motion detection to determine where to run object detection
- Object detection with Tensorflow runs in a separate process
- Object info is published over MQTT for integration into HomeAssistant as a binary sensor - Object info is published over MQTT for integration into HomeAssistant as a binary sensor
- An endpoint is available to view an MJPEG stream for debugging - An endpoint is available to view an MJPEG stream for debugging, but should not be used continuously
![Diagram](diagram.png) ![Diagram](diagram.png)
@@ -22,12 +21,16 @@ Build the container with
docker build -t frigate . docker build -t frigate .
``` ```
The `mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite` model is included and used by default. You can use your own model and labels by mounting files in the container at `/frozen_inference_graph.pb` and `/label_map.pbtext`. Models must be compatible with the Coral according to [this](https://coral.withgoogle.com/models/). Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use your own models with volume mounts:
- CPU Model: `/cpu_model.tflite`
- EdgeTPU Model: `/edgetpu_model.tflite`
- Labels: `/labelmap.txt`
Run the container with Run the container with
``` ```bash
docker run --rm \ docker run --rm \
--privileged \ --privileged \
--shm-size=512m \ # should work for a 2-3 cameras
-v /dev/bus/usb:/dev/bus/usb \ -v /dev/bus/usb:/dev/bus/usb \
-v <path_to_config_dir>:/config:ro \ -v <path_to_config_dir>:/config:ro \
-v /etc/localtime:/etc/localtime:ro \ -v /etc/localtime:/etc/localtime:ro \
@@ -37,11 +40,12 @@ frigate:latest
``` ```
Example docker-compose: Example docker-compose:
``` ```yaml
frigate: frigate:
container_name: frigate container_name: frigate
restart: unless-stopped restart: unless-stopped
privileged: true privileged: true
shm_size: '1g' # should work for 5-7 cameras
image: frigate:latest image: frigate:latest
volumes: volumes:
- /dev/bus/usb:/dev/bus/usb - /dev/bus/usb:/dev/bus/usb
@@ -57,6 +61,8 @@ A `config.yml` file must exist in the `config` directory. See example [here](con
Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best snapshot for any object type with at `http://localhost:5000/<camera_name>/<object_name>/best.jpg` Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best snapshot for any object type with at `http://localhost:5000/<camera_name>/<object_name>/best.jpg`
Debug info is available at `http://localhost:5000/debug/stats`
## Integration with HomeAssistant ## Integration with HomeAssistant
``` ```
camera: camera:
@@ -93,30 +99,34 @@ automation:
photo: photo:
- url: http://<ip>:5000/<camera_name>/person/best.jpg - url: http://<ip>:5000/<camera_name>/person/best.jpg
caption: A person was detected. caption: A person was detected.
sensor:
- platform: rest
name: Frigate Debug
resource: http://localhost:5000/debug/stats
scan_interval: 5
json_attributes:
- back
- coral
value_template: 'OK'
- platform: template
sensors:
back_fps:
value_template: '{{ states.sensor.frigate_debug.attributes["back"]["fps"] }}'
unit_of_measurement: 'FPS'
back_skipped_fps:
value_template: '{{ states.sensor.frigate_debug.attributes["back"]["skipped_fps"] }}'
unit_of_measurement: 'FPS'
back_detection_fps:
value_template: '{{ states.sensor.frigate_debug.attributes["back"]["detection_fps"] }}'
unit_of_measurement: 'FPS'
frigate_coral_fps:
value_template: '{{ states.sensor.frigate_debug.attributes["coral"]["fps"] }}'
unit_of_measurement: 'FPS'
frigate_coral_inference:
value_template: '{{ states.sensor.frigate_debug.attributes["coral"]["inference_speed"] }}'
unit_of_measurement: 'ms'
``` ```
## Tips ## Tips
- Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed - Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed
## Future improvements
- [x] Remove motion detection for now
- [x] Try running object detection in a thread rather than a process
- [x] Implement min person size again
- [x] Switch to a config file
- [x] Handle multiple cameras in the same container
- [ ] Attempt to figure out coral symlinking
- [ ] Add object list to config with min scores for mqtt
- [ ] Move mjpeg encoding to a separate process
- [ ] Simplify motion detection (check entire image against mask, resize instead of gaussian blur)
- [ ] See if motion detection is even worth running
- [ ] Scan for people across entire image rather than specfic regions
- [ ] Dynamically resize detection area and follow people
- [ ] Add ability to turn detection on and off via MQTT
- [ ] Output movie clips of people for notifications, etc.
- [ ] Integrate with homeassistant push camera
- [ ] Merge bounding boxes that span multiple regions
- [ ] Implement mode to save labeled objects for training
- [ ] Try and reduce CPU usage by simplifying the tensorflow model to just include the objects we care about
- [ ] Look into GPU accelerated decoding of RTSP stream
- [ ] Send video over a socket and use JSMPEG
- [x] Look into neural compute stick

85
benchmark.py Normal file → Executable file
View File

@@ -1,20 +1,79 @@
import statistics import os
from statistics import mean
import multiprocessing as mp
import numpy as np import numpy as np
from edgetpu.detection.engine import DetectionEngine import datetime
from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
# Path to frozen detection graph. This is the actual model that is used for the object detection. my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
PATH_TO_CKPT = '/frozen_inference_graph.pb' labels = load_labels('/labelmap.txt')
# Load the edgetpu engine and labels ######
engine = DetectionEngine(PATH_TO_CKPT) # Minimal same process runner
######
# object_detector = ObjectDetector()
# tensor_input = np.expand_dims(np.full((300,300,3), 0, np.uint8), axis=0)
frame = np.zeros((300,300,3), np.uint8) # start = datetime.datetime.now().timestamp()
flattened_frame = np.expand_dims(frame, axis=0).flatten()
detection_times = [] # frame_times = []
# for x in range(0, 1000):
# start_frame = datetime.datetime.now().timestamp()
for x in range(0, 1000): # tensor_input[:] = my_frame
objects = engine.detect_with_input_tensor(flattened_frame, threshold=0.1, top_k=3) # detections = object_detector.detect_raw(tensor_input)
detection_times.append(engine.get_inference_time()) # parsed_detections = []
# for d in detections:
# if d[1] < 0.4:
# break
# parsed_detections.append((
# labels[int(d[0])],
# float(d[1]),
# (d[2], d[3], d[4], d[5])
# ))
# frame_times.append(datetime.datetime.now().timestamp()-start_frame)
print("Average inference time: " + str(statistics.mean(detection_times))) # duration = datetime.datetime.now().timestamp()-start
# print(f"Processed for {duration:.2f} seconds.")
# print(f"Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######
# Separate process runner
######
def start(id, num_detections, detection_queue):
object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue)
start = datetime.datetime.now().timestamp()
frame_times = []
for x in range(0, num_detections):
start_frame = datetime.datetime.now().timestamp()
detections = object_detector.detect(my_frame)
frame_times.append(datetime.datetime.now().timestamp()-start_frame)
duration = datetime.datetime.now().timestamp()-start
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
edgetpu_process = EdgeTPUProcess()
# start(1, 1000, edgetpu_process.detect_lock, edgetpu_process.detect_ready, edgetpu_process.frame_ready)
####
# Multiple camera processes
####
camera_processes = []
for x in range(0, 10):
camera_process = mp.Process(target=start, args=(x, 100, edgetpu_process.detection_queue))
camera_process.daemon = True
camera_processes.append(camera_process)
start = datetime.datetime.now().timestamp()
for p in camera_processes:
p.start()
for p in camera_processes:
p.join()
duration = datetime.datetime.now().timestamp()-start
print(f"Total - Processed for {duration:.2f} seconds.")

View File

@@ -39,8 +39,6 @@ mqtt:
# - -use_wallclock_as_timestamps # - -use_wallclock_as_timestamps
# - '1' # - '1'
# output_args: # output_args:
# - -vf
# - mpdecimate
# - -f # - -f
# - rawvideo # - rawvideo
# - -pix_fmt # - -pix_fmt
@@ -89,12 +87,15 @@ cameras:
# width: 720 # width: 720
################ ################
## Optional mask. Must be the same dimensions as your video feed. ## Optional mask. Must be the same aspect ratio as your video feed.
##
## The mask works by looking at the bottom center of the bounding box for the detected ## The mask works by looking at the bottom center of the bounding box for the detected
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a ## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
## false positive. In my mask, the grass and driveway visible from my backdoor camera ## false positive. In my mask, the grass and driveway visible from my backdoor camera
## are white. The garage doors, sky, and trees (anywhere it would be impossible for a ## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
## person to stand) are black. ## person to stand) are black.
##
## Masked areas are also ignored for motion detection.
################ ################
# mask: back-mask.bmp # mask: back-mask.bmp
@@ -106,13 +107,14 @@ cameras:
take_frame: 1 take_frame: 1
################ ################
# The number of seconds frigate will allow a camera to go without sending a frame before # The expected framerate for the camera. Frigate will try and ensure it maintains this framerate
# assuming the ffmpeg process has a problem and restarting. # by dropping frames as necessary. Setting this lower than the actual framerate will allow frigate
# to process every frame at the expense of realtime processing.
################ ################
# watchdog_timeout: 300 fps: 5
################ ################
# Configuration for the snapshot sent over mqtt # Configuration for the snapshots in the debug view and mqtt
################ ################
snapshots: snapshots:
show_timestamp: True show_timestamp: True
@@ -128,21 +130,3 @@ cameras:
min_area: 5000 min_area: 5000
max_area: 100000 max_area: 100000
threshold: 0.5 threshold: 0.5
################
# size: size of the region in pixels
# x_offset/y_offset: position of the upper left corner of your region (top left of image is 0,0)
# Tips: All regions are resized to 300x300 before detection because the model is trained on that size.
# Resizing regions takes CPU power. Ideally, all regions should be as close to 300x300 as possible.
# Defining a region that goes outside the bounds of the image will result in errors.
################
regions:
- size: 350
x_offset: 0
y_offset: 300
- size: 400
x_offset: 350
y_offset: 250
- size: 400
x_offset: 750
y_offset: 250

View File

@@ -1,14 +1,23 @@
import os
import cv2 import cv2
import time import time
import datetime
import queue import queue
import yaml import yaml
import threading
import multiprocessing as mp
import subprocess as sp
import numpy as np import numpy as np
import logging
from flask import Flask, Response, make_response, jsonify from flask import Flask, Response, make_response, jsonify
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
from frigate.video import Camera from frigate.video import track_camera
from frigate.object_detection import PreppedQueueProcessor from frigate.object_processing import TrackedObjectProcessor
from frigate.util import EventsPerSecond from frigate.util import EventsPerSecond
from frigate.edgetpu import EdgeTPUProcess
FRIGATE_VARS = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
with open('/config/config.yml') as f: with open('/config/config.yml') as f:
CONFIG = yaml.safe_load(f) CONFIG = yaml.safe_load(f)
@@ -18,6 +27,8 @@ MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate') MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
MQTT_USER = CONFIG.get('mqtt', {}).get('user') MQTT_USER = CONFIG.get('mqtt', {}).get('user')
MQTT_PASS = CONFIG.get('mqtt', {}).get('password') MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
if not MQTT_PASS is None:
MQTT_PASS = MQTT_PASS.format(**FRIGATE_VARS)
MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate') MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
# Set the default FFmpeg config # Set the default FFmpeg config
@@ -38,8 +49,7 @@ FFMPEG_DEFAULT_CONFIG = {
'-stimeout', '5000000', '-stimeout', '5000000',
'-use_wallclock_as_timestamps', '1']), '-use_wallclock_as_timestamps', '1']),
'output_args': FFMPEG_CONFIG.get('output_args', 'output_args': FFMPEG_CONFIG.get('output_args',
['-vf', 'mpdecimate', ['-f', 'rawvideo',
'-f', 'rawvideo',
'-pix_fmt', 'rgb24']) '-pix_fmt', 'rgb24'])
} }
@@ -48,6 +58,41 @@ GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
WEB_PORT = CONFIG.get('web_port', 5000) WEB_PORT = CONFIG.get('web_port', 5000)
DEBUG = (CONFIG.get('debug', '0') == '1') DEBUG = (CONFIG.get('debug', '0') == '1')
class CameraWatchdog(threading.Thread):
def __init__(self, camera_processes, config, tflite_process, tracked_objects_queue, object_processor):
threading.Thread.__init__(self)
self.camera_processes = camera_processes
self.config = config
self.tflite_process = tflite_process
self.tracked_objects_queue = tracked_objects_queue
self.object_processor = object_processor
def run(self):
time.sleep(10)
while True:
# wait a bit before checking
time.sleep(30)
if (self.tflite_process.detection_start.value > 0.0 and
datetime.datetime.now().timestamp() - self.tflite_process.detection_start.value > 10):
print("Detection appears to be stuck. Restarting detection process")
time.sleep(30)
for name, camera_process in self.camera_processes.items():
process = camera_process['process']
if not process.is_alive():
print(f"Process for {name} is not alive. Starting again...")
camera_process['fps'].value = float(self.config[name]['fps'])
camera_process['skipped_fps'].value = 0.0
camera_process['detection_fps'].value = 0.0
process = mp.Process(target=track_camera, args=(name, self.config[name], FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG,
self.tflite_process.detection_queue, self.tracked_objects_queue,
camera_process['fps'], camera_process['skipped_fps'], camera_process['detection_fps']))
process.daemon = True
camera_process['process'] = process
process.start()
print(f"Camera_process started for {name}: {process.pid}")
def main(): def main():
# connect to mqtt and setup last will # connect to mqtt and setup last will
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
@@ -71,30 +116,57 @@ def main():
client.connect(MQTT_HOST, MQTT_PORT, 60) client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start() client.loop_start()
# Queue for prepped frames, max size set to number of regions * 3 # start plasma store
prepped_frame_queue = queue.Queue() plasma_cmd = ['plasma_store', '-m', '400000000', '-s', '/tmp/plasma']
plasma_process = sp.Popen(plasma_cmd, stdout=sp.DEVNULL)
time.sleep(1)
rc = plasma_process.poll()
if rc is not None:
raise RuntimeError("plasma_store exited unexpectedly with "
"code %d" % (rc,))
cameras = {} ##
# Setup config defaults for cameras
##
for name, config in CONFIG['cameras'].items(): for name, config in CONFIG['cameras'].items():
cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG, config, config['snapshots'] = {
prepped_frame_queue, client, MQTT_TOPIC_PREFIX) 'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True)
}
fps_tracker = EventsPerSecond() # Queue for cameras to push tracked objects to
tracked_objects_queue = mp.Queue()
prepped_queue_processor = PreppedQueueProcessor( # Start the shared tflite process
cameras, tflite_process = EdgeTPUProcess()
prepped_frame_queue,
fps_tracker
)
prepped_queue_processor.start()
fps_tracker.start()
for name, camera in cameras.items(): # start the camera processes
camera.start() camera_processes = {}
print("Capture process for {}: {}".format(name, camera.get_capture_pid())) for name, config in CONFIG['cameras'].items():
camera_processes[name] = {
'fps': mp.Value('d', float(config['fps'])),
'skipped_fps': mp.Value('d', 0.0),
'detection_fps': mp.Value('d', 0.0)
}
camera_process = mp.Process(target=track_camera, args=(name, config, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG,
tflite_process.detection_queue, tracked_objects_queue,
camera_processes[name]['fps'], camera_processes[name]['skipped_fps'], camera_processes[name]['detection_fps']))
camera_process.daemon = True
camera_processes[name]['process'] = camera_process
for name, camera_process in camera_processes.items():
camera_process['process'].start()
print(f"Camera_process started for {name}: {camera_process['process'].pid}")
object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue)
object_processor.start()
camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], tflite_process, tracked_objects_queue, object_processor)
camera_watchdog.start()
# create a flask app that encodes frames a mjpeg on demand # create a flask app that encodes frames a mjpeg on demand
app = Flask(__name__) app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/') @app.route('/')
def ishealthy(): def ishealthy():
@@ -103,23 +175,36 @@ def main():
@app.route('/debug/stats') @app.route('/debug/stats')
def stats(): def stats():
stats = { stats = {}
'coral': {
'fps': fps_tracker.eps(), total_detection_fps = 0
'inference_speed': prepped_queue_processor.avg_inference_speed,
'queue_length': prepped_frame_queue.qsize() for name, camera_stats in camera_processes.items():
total_detection_fps += camera_stats['detection_fps'].value
stats[name] = {
'fps': round(camera_stats['fps'].value, 2),
'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
'detection_fps': round(camera_stats['detection_fps'].value, 2)
} }
stats['coral'] = {
'fps': round(total_detection_fps, 2),
'inference_speed': round(tflite_process.avg_inference_speed.value*1000, 2),
'detection_queue': tflite_process.detection_queue.qsize(),
'detection_start': tflite_process.detection_start.value
} }
for name, camera in cameras.items(): rc = plasma_process.poll()
stats[name] = camera.stats() stats['plasma_store_rc'] = rc
stats['tracked_objects_queue'] = tracked_objects_queue.qsize()
return jsonify(stats) return jsonify(stats)
@app.route('/<camera_name>/<label>/best.jpg') @app.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label): def best(camera_name, label):
if camera_name in cameras: if camera_name in CONFIG['cameras']:
best_frame = cameras[camera_name].get_best(label) best_frame = object_processor.get_best(camera_name, label)
if best_frame is None: if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8) best_frame = np.zeros((720,1280,3), np.uint8)
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR) best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
@@ -132,7 +217,7 @@ def main():
@app.route('/<camera_name>') @app.route('/<camera_name>')
def mjpeg_feed(camera_name): def mjpeg_feed(camera_name):
if camera_name in cameras: if camera_name in CONFIG['cameras']:
# return a multipart response # return a multipart response
return Response(imagestream(camera_name), return Response(imagestream(camera_name),
mimetype='multipart/x-mixed-replace; boundary=frame') mimetype='multipart/x-mixed-replace; boundary=frame')
@@ -143,13 +228,19 @@ def main():
while True: while True:
# max out at 1 FPS # max out at 1 FPS
time.sleep(1) time.sleep(1)
frame = cameras[camera_name].get_current_frame_with_objects() frame = object_processor.get_current_frame(camera_name)
if frame is None:
frame = np.zeros((720,1280,3), np.uint8)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n' yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n') b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
app.run(host='0.0.0.0', port=WEB_PORT, debug=False) app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
camera.join() camera_watchdog.join()
plasma_process.terminate()
if __name__ == '__main__': if __name__ == '__main__':
main() main()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 283 KiB

After

Width:  |  Height:  |  Size: 132 KiB

136
frigate/edgetpu.py Normal file
View File

@@ -0,0 +1,136 @@
import os
import datetime
import hashlib
import multiprocessing as mp
import numpy as np
import SharedArray as sa
import pyarrow.plasma as plasma
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, 'r', encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(' ', maxsplit=1)[0].isdigit():
pairs = [line.split(' ', maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
class ObjectDetector():
def __init__(self):
edge_tpu_delegate = None
try:
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0')
except ValueError:
print("No EdgeTPU detected. Falling back to CPU.")
if edge_tpu_delegate is None:
self.interpreter = tflite.Interpreter(
model_path='/cpu_model.tflite')
else:
self.interpreter = tflite.Interpreter(
model_path='/edgetpu_model.tflite',
experimental_delegates=[edge_tpu_delegate])
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
self.interpreter.invoke()
boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
detections = np.zeros((20,6), np.float32)
for i, score in enumerate(scores):
detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
return detections
def run_detector(detection_queue, avg_speed, start):
print(f"Starting detection process: {os.getpid()}")
plasma_client = plasma.connect("/tmp/plasma")
object_detector = ObjectDetector()
while True:
object_id_str = detection_queue.get()
object_id_hash = hashlib.sha1(str.encode(object_id_str))
object_id = plasma.ObjectID(object_id_hash.digest())
input_frame = plasma_client.get(object_id, timeout_ms=0)
start.value = datetime.datetime.now().timestamp()
# detect and put the output in the plasma store
object_id_out = hashlib.sha1(str.encode(f"out-{object_id_str}")).digest()
plasma_client.put(object_detector.detect_raw(input_frame), plasma.ObjectID(object_id_out))
duration = datetime.datetime.now().timestamp()-start.value
start.value = 0.0
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
def __init__(self):
self.detection_queue = mp.Queue()
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.detect_process = None
self.start_or_restart()
def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.detect_process.terminate()
print("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
if self.detect_process.exitcode is None:
print("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
self.detect_process = mp.Process(target=run_detector, args=(self.detection_queue, self.avg_inference_speed, self.detection_start))
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
def __init__(self, name, labels, detection_queue):
self.labels = load_labels(labels)
self.name = name
self.fps = EventsPerSecond()
self.plasma_client = plasma.connect("/tmp/plasma")
self.detection_queue = detection_queue
def detect(self, tensor_input, threshold=.4):
detections = []
now = f"{self.name}-{str(datetime.datetime.now().timestamp())}"
object_id_frame = plasma.ObjectID(hashlib.sha1(str.encode(now)).digest())
object_id_detections = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{now}")).digest())
self.plasma_client.put(tensor_input, object_id_frame)
self.detection_queue.put(now)
raw_detections = self.plasma_client.get(object_id_detections)
for d in raw_detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
self.plasma_client.delete([object_id_frame, object_id_detections])
self.fps.update()
return detections

79
frigate/motion.py Normal file
View File

@@ -0,0 +1,79 @@
import cv2
import imutils
import numpy as np
class MotionDetector():
def __init__(self, frame_shape, mask, resize_factor=4):
self.resize_factor = resize_factor
self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
self.frame_counter = 0
resized_mask = cv2.resize(mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
self.mask = np.where(resized_mask==[0])
def detect(self, frame):
motion_boxes = []
# resize frame
resized_frame = cv2.resize(frame, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
# convert to grayscale
gray = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
# mask frame
gray[self.mask] = [255]
# it takes ~30 frames to establish a baseline
# dont bother looking for motion
if self.frame_counter < 30:
self.frame_counter += 1
else:
# compare to average
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg_frame))
# compute the average delta over the past few frames
# the alpha value can be modified to configure how sensitive the motion detection is.
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
# register as motion, too low and a fast moving person wont be detected as motion
# this also assumes that a person is in the same location across more than a single frame
cv2.accumulateWeighted(frameDelta, self.avg_delta, 0.2)
# compute the threshold image for the current frame
current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
avg_delta_image[np.where(current_thresh==[0])] = [0]
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
if contour_area > 100:
x, y, w, h = cv2.boundingRect(c)
motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
if len(motion_boxes) > 0:
self.motion_frame_count += 1
# TODO: this really depends on FPS
if self.motion_frame_count >= 10:
# only average in the current frame if the difference persists for at least 3 frames
cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
else:
# when no motion, just keep averaging the frames together
cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
self.motion_frame_count = 0
return motion_boxes

View File

@@ -1,54 +0,0 @@
import json
import cv2
import threading
import prctl
from collections import Counter, defaultdict
import itertools
class MqttObjectPublisher(threading.Thread):
def __init__(self, client, topic_prefix, camera):
threading.Thread.__init__(self)
self.client = client
self.topic_prefix = topic_prefix
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
current_object_status = defaultdict(lambda: 'OFF')
while True:
# wait until objects have been tracked
with self.camera.objects_tracked:
self.camera.objects_tracked.wait()
# count objects with more than 2 entries in history by type
obj_counter = Counter()
for obj in self.camera.object_tracker.tracked_objects.values():
if len(obj['history']) > 1:
obj_counter[obj['name']] += 1
# report on detected objects
for obj_name, count in obj_counter.items():
new_status = 'ON' if count > 0 else 'OFF'
if new_status != current_object_status[obj_name]:
current_object_status[obj_name] = new_status
self.client.publish(self.topic_prefix+'/'+obj_name, new_status, retain=False)
# send the snapshot over mqtt if we have it as well
if obj_name in self.camera.best_frames.best_frames:
best_frame = cv2.cvtColor(self.camera.best_frames.best_frames[obj_name], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/'+obj_name+'/snapshot', jpg_bytes, retain=True)
# expire any objects that are ON and no longer detected
expired_objects = [obj_name for obj_name, status in current_object_status.items() if status == 'ON' and not obj_name in obj_counter]
for obj_name in expired_objects:
current_object_status[obj_name] = 'OFF'
self.client.publish(self.topic_prefix+'/'+obj_name, 'OFF', retain=False)
# send updated snapshot snapshot over mqtt if we have it as well
if obj_name in self.camera.best_frames.best_frames:
best_frame = cv2.cvtColor(self.camera.best_frames.best_frames[obj_name], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/'+obj_name+'/snapshot', jpg_bytes, retain=True)

View File

@@ -1,139 +0,0 @@
import datetime
import time
import cv2
import threading
import copy
import prctl
import numpy as np
from edgetpu.detection.engine import DetectionEngine
from frigate.util import tonumpyarray, LABELS, PATH_TO_CKPT, calculate_region
class PreppedQueueProcessor(threading.Thread):
def __init__(self, cameras, prepped_frame_queue, fps):
threading.Thread.__init__(self)
self.cameras = cameras
self.prepped_frame_queue = prepped_frame_queue
# Load the edgetpu engine and labels
self.engine = DetectionEngine(PATH_TO_CKPT)
self.labels = LABELS
self.fps = fps
self.avg_inference_speed = 10
def run(self):
prctl.set_name(self.__class__.__name__)
# process queue...
while True:
frame = self.prepped_frame_queue.get()
# Actual detection.
frame['detected_objects'] = self.engine.detect_with_input_tensor(frame['frame'], threshold=0.2, top_k=5)
self.fps.update()
self.avg_inference_speed = (self.avg_inference_speed*9 + self.engine.get_inference_time())/10
self.cameras[frame['camera_name']].detected_objects_queue.put(frame)
class RegionRequester(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
frame_time = 0.0
while True:
now = datetime.datetime.now().timestamp()
with self.camera.frame_ready:
# if there isnt a frame ready for processing or it is old, wait for a new frame
if self.camera.frame_time.value == frame_time or (now - self.camera.frame_time.value) > 0.5:
self.camera.frame_ready.wait()
# make a copy of the frame_time
frame_time = self.camera.frame_time.value
# grab the current tracked objects
with self.camera.object_tracker.tracked_objects_lock:
tracked_objects = copy.deepcopy(self.camera.object_tracker.tracked_objects).values()
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[frame_time] = len(self.camera.config['regions'])
self.camera.regions_in_process[frame_time] += len(tracked_objects)
for index, region in enumerate(self.camera.config['regions']):
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': index,
'size': region['size'],
'x_offset': region['x_offset'],
'y_offset': region['y_offset']
})
# request a region for tracked objects
for tracked_object in tracked_objects:
box = tracked_object['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
class RegionPrepper(threading.Thread):
def __init__(self, camera, frame_cache, resize_request_queue, prepped_frame_queue):
threading.Thread.__init__(self)
self.camera = camera
self.frame_cache = frame_cache
self.resize_request_queue = resize_request_queue
self.prepped_frame_queue = prepped_frame_queue
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
resize_request = self.resize_request_queue.get()
# if the queue is over 100 items long, only prep dynamic regions
if resize_request['region_id'] != -1 and self.prepped_frame_queue.qsize() > 100:
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[resize_request['frame_time']] -= 1
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
del self.camera.regions_in_process[resize_request['frame_time']]
self.camera.skipped_region_tracker.update()
continue
frame = self.frame_cache.get(resize_request['frame_time'], None)
if frame is None:
print("RegionPrepper: frame_time not in frame_cache")
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[resize_request['frame_time']] -= 1
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
del self.camera.regions_in_process[resize_request['frame_time']]
self.camera.skipped_region_tracker.update()
continue
# make a copy of the region
cropped_frame = frame[resize_request['y_offset']:resize_request['y_offset']+resize_request['size'], resize_request['x_offset']:resize_request['x_offset']+resize_request['size']].copy()
# Resize to 300x300 if needed
if cropped_frame.shape != (300, 300, 3):
# TODO: use Pillow-SIMD?
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
frame_expanded = np.expand_dims(cropped_frame, axis=0)
# add the frame to the queue
resize_request['frame'] = frame_expanded.flatten().copy()
self.prepped_frame_queue.put(resize_request)

View File

@@ -0,0 +1,149 @@
import json
import hashlib
import datetime
import copy
import cv2
import threading
import numpy as np
from collections import Counter, defaultdict
import itertools
import pyarrow.plasma as plasma
import SharedArray as sa
import matplotlib.pyplot as plt
from frigate.util import draw_box_with_label
from frigate.edgetpu import load_labels
PATH_TO_LABELS = '/labelmap.txt'
LABELS = load_labels(PATH_TO_LABELS)
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
COLOR_MAP = {}
for key, val in LABELS.items():
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
class TrackedObjectProcessor(threading.Thread):
def __init__(self, config, client, topic_prefix, tracked_objects_queue):
threading.Thread.__init__(self)
self.config = config
self.client = client
self.topic_prefix = topic_prefix
self.tracked_objects_queue = tracked_objects_queue
self.plasma_client = plasma.connect("/tmp/plasma")
self.camera_data = defaultdict(lambda: {
'best_objects': {},
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
'tracked_objects': {},
'current_frame': np.zeros((720,1280,3), np.uint8),
'object_id': None
})
def get_best(self, camera, label):
if label in self.camera_data[camera]['best_objects']:
return self.camera_data[camera]['best_objects'][label]['frame']
else:
return None
def get_current_frame(self, camera):
return self.camera_data[camera]['current_frame']
def run(self):
while True:
camera, frame_time, tracked_objects = self.tracked_objects_queue.get()
config = self.config[camera]
best_objects = self.camera_data[camera]['best_objects']
current_object_status = self.camera_data[camera]['object_status']
self.camera_data[camera]['tracked_objects'] = tracked_objects
###
# Draw tracked objects on the frame
###
object_id_hash = hashlib.sha1(str.encode(f"{camera}{frame_time}"))
object_id_bytes = object_id_hash.digest()
object_id = plasma.ObjectID(object_id_bytes)
current_frame = self.plasma_client.get(object_id, timeout_ms=0)
if not current_frame is plasma.ObjectNotAvailable:
# draw the bounding boxes on the frame
for obj in tracked_objects.values():
thickness = 2
color = COLOR_MAP[obj['label']]
if obj['frame_time'] != frame_time:
thickness = 1
color = (255,0,0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
# draw the regions on the frame
region = obj['region']
cv2.rectangle(current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
if config['snapshots']['show_timestamp']:
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
###
# Set the current frame as ready
###
self.camera_data[camera]['current_frame'] = current_frame
# store the object id, so you can delete it at the next loop
previous_object_id = self.camera_data[camera]['object_id']
if not previous_object_id is None:
self.plasma_client.delete([previous_object_id])
self.camera_data[camera]['object_id'] = object_id
###
# Maintain the highest scoring recent object and frame for each label
###
for obj in tracked_objects.values():
# if the object wasn't seen on the current frame, skip it
if obj['frame_time'] != frame_time:
continue
if obj['label'] in best_objects:
now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score
# or the current object is more than 1 minute old, use the new object
if obj['score'] > best_objects[obj['label']]['score'] or (now - best_objects[obj['label']]['frame_time']) > 60:
obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
best_objects[obj['label']] = obj
else:
obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
best_objects[obj['label']] = obj
###
# Report over MQTT
###
# count objects with more than 2 entries in history by type
obj_counter = Counter()
for obj in tracked_objects.values():
if len(obj['history']) > 1:
obj_counter[obj['label']] += 1
# report on detected objects
for obj_name, count in obj_counter.items():
new_status = 'ON' if count > 0 else 'OFF'
if new_status != current_object_status[obj_name]:
current_object_status[obj_name] = new_status
self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}", new_status, retain=False)
# send the best snapshot over mqtt
best_frame = cv2.cvtColor(best_objects[obj_name]['frame'], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}/snapshot", jpg_bytes, retain=True)
# expire any objects that are ON and no longer detected
expired_objects = [obj_name for obj_name, status in current_object_status.items() if status == 'ON' and not obj_name in obj_counter]
for obj_name in expired_objects:
current_object_status[obj_name] = 'OFF'
self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}", 'OFF', retain=False)
# send updated snapshot over mqtt
best_frame = cv2.cvtColor(best_objects[obj_name]['frame'], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}/snapshot", jpg_bytes, retain=True)

View File

@@ -2,277 +2,34 @@ import time
import datetime import datetime
import threading import threading
import cv2 import cv2
import prctl
import itertools import itertools
import copy import copy
import numpy as np import numpy as np
import multiprocessing as mp import multiprocessing as mp
from collections import defaultdict from collections import defaultdict
from scipy.spatial import distance as dist from scipy.spatial import distance as dist
from frigate.util import draw_box_with_label, LABELS, compute_intersection_rectangle, compute_intersection_over_union, calculate_region from frigate.util import draw_box_with_label, calculate_region
class ObjectCleaner(threading.Thread): class ObjectTracker():
def __init__(self, camera): def __init__(self, max_disappeared):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name("ObjectCleaner")
while True:
# wait a bit before checking for expired frames
time.sleep(0.2)
for frame_time in list(self.camera.detected_objects.keys()).copy():
if not frame_time in self.camera.frame_cache:
del self.camera.detected_objects[frame_time]
objects_deregistered = False
with self.camera.object_tracker.tracked_objects_lock:
now = datetime.datetime.now().timestamp()
for id, obj in list(self.camera.object_tracker.tracked_objects.items()):
# if the object is more than 10 seconds old
# and not in the most recent frame, deregister
if (now - obj['frame_time']) > 10 and self.camera.object_tracker.most_recent_frame_time > obj['frame_time']:
self.camera.object_tracker.deregister(id)
objects_deregistered = True
if objects_deregistered:
with self.camera.objects_tracked:
self.camera.objects_tracked.notify_all()
class DetectedObjectsProcessor(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame = self.camera.detected_objects_queue.get()
objects = frame['detected_objects']
for raw_obj in objects:
name = str(LABELS[raw_obj.label_id])
if not name in self.camera.objects_to_track:
continue
obj = {
'name': name,
'score': float(raw_obj.score),
'box': {
'xmin': int((raw_obj.bounding_box[0][0] * frame['size']) + frame['x_offset']),
'ymin': int((raw_obj.bounding_box[0][1] * frame['size']) + frame['y_offset']),
'xmax': int((raw_obj.bounding_box[1][0] * frame['size']) + frame['x_offset']),
'ymax': int((raw_obj.bounding_box[1][1] * frame['size']) + frame['y_offset'])
},
'region': {
'xmin': frame['x_offset'],
'ymin': frame['y_offset'],
'xmax': frame['x_offset']+frame['size'],
'ymax': frame['y_offset']+frame['size']
},
'frame_time': frame['frame_time'],
'region_id': frame['region_id']
}
# if the object is within 5 pixels of the region border, and the region is not on the edge
# consider the object to be clipped
obj['clipped'] = False
if ((obj['region']['xmin'] > 5 and obj['box']['xmin']-obj['region']['xmin'] <= 5) or
(obj['region']['ymin'] > 5 and obj['box']['ymin']-obj['region']['ymin'] <= 5) or
(self.camera.frame_shape[1]-obj['region']['xmax'] > 5 and obj['region']['xmax']-obj['box']['xmax'] <= 5) or
(self.camera.frame_shape[0]-obj['region']['ymax'] > 5 and obj['region']['ymax']-obj['box']['ymax'] <= 5)):
obj['clipped'] = True
# Compute the area
# TODO: +1 right?
obj['area'] = (obj['box']['xmax']-obj['box']['xmin'])*(obj['box']['ymax']-obj['box']['ymin'])
self.camera.detected_objects[frame['frame_time']].append(obj)
# TODO: use in_process and processed counts instead to avoid lock
with self.camera.regions_in_process_lock:
if frame['frame_time'] in self.camera.regions_in_process:
self.camera.regions_in_process[frame['frame_time']] -= 1
# print(f"{frame['frame_time']} remaining regions {self.camera.regions_in_process[frame['frame_time']]}")
if self.camera.regions_in_process[frame['frame_time']] == 0:
del self.camera.regions_in_process[frame['frame_time']]
# print(f"{frame['frame_time']} no remaining regions")
self.camera.finished_frame_queue.put(frame['frame_time'])
else:
self.camera.finished_frame_queue.put(frame['frame_time'])
# Thread that checks finished frames for clipped objects and sends back
# for processing if needed
# TODO: evaluate whether or not i really need separate threads/queues for each step
# given that only 1 thread will really be able to run at a time. you need a
# separate process to actually do things in parallel for when you are CPU bound.
# threads are good when you are waiting and could be processing while you wait
class RegionRefiner(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame_time = self.camera.finished_frame_queue.get()
detected_objects = self.camera.detected_objects[frame_time].copy()
# print(f"{frame_time} finished")
# group by name
detected_object_groups = defaultdict(lambda: [])
for obj in detected_objects:
detected_object_groups[obj['name']].append(obj)
look_again = False
selected_objects = []
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = [(o['box']['xmin'], o['box']['ymin'], o['box']['xmax']-o['box']['xmin'], o['box']['ymax']-o['box']['ymin'])
for o in group]
confidences = [o['score'] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for index in idxs:
obj = group[index[0]]
selected_objects.append(obj)
if obj['clipped']:
box = obj['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
# print(f"{frame_time} new region: {size} {x_offset} {y_offset}")
with self.camera.regions_in_process_lock:
if not frame_time in self.camera.regions_in_process:
self.camera.regions_in_process[frame_time] = 1
else:
self.camera.regions_in_process[frame_time] += 1
# add it to the queue
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
self.camera.dynamic_region_fps.update()
look_again = True
# if we are looking again, then this frame is not ready for processing
if look_again:
# remove the clipped objects
self.camera.detected_objects[frame_time] = [o for o in selected_objects if not o['clipped']]
continue
# filter objects based on camera settings
selected_objects = [o for o in selected_objects if not self.filtered(o)]
self.camera.detected_objects[frame_time] = selected_objects
# print(f"{frame_time} is actually finished")
# keep adding frames to the refined queue as long as they are finished
with self.camera.regions_in_process_lock:
while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process:
self.camera.last_processed_frame = self.camera.frame_queue.get()
self.camera.refined_frame_queue.put(self.camera.last_processed_frame)
def filtered(self, obj):
object_name = obj['name']
if object_name in self.camera.object_filters:
obj_settings = self.camera.object_filters[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.get('min_area',-1) > obj['area']:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.get('max_area', self.camera.frame_shape[0]*self.camera.frame_shape[1]) < obj['area']:
return True
# if the score is lower than the threshold, skip
if obj_settings.get('threshold', 0) > obj['score']:
return True
# compute the coordinates of the object and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
y_location = min(int(obj['box']['ymax']), len(self.camera.mask)-1)
x_location = min(int((obj['box']['xmax']-obj['box']['xmin'])/2.0)+obj['box']['xmin'], len(self.camera.mask[0])-1)
# if the object is in a masked location, don't add it to detected objects
if self.camera.mask[y_location][x_location] == [0]:
return True
return False
def has_overlap(self, new_obj, obj, overlap=.7):
# compute intersection rectangle with existing object and new objects region
existing_obj_current_region = compute_intersection_rectangle(obj['box'], new_obj['region'])
# compute intersection rectangle with new object and existing objects region
new_obj_existing_region = compute_intersection_rectangle(new_obj['box'], obj['region'])
# compute iou for the two intersection rectangles that were just computed
iou = compute_intersection_over_union(existing_obj_current_region, new_obj_existing_region)
# if intersection is greater than overlap
if iou > overlap:
return True
else:
return False
def find_group(self, new_obj, groups):
for index, group in enumerate(groups):
for obj in group:
if self.has_overlap(new_obj, obj):
return index
return None
class ObjectTracker(threading.Thread):
def __init__(self, camera, max_disappeared):
threading.Thread.__init__(self)
self.camera = camera
self.tracked_objects = {} self.tracked_objects = {}
self.tracked_objects_lock = mp.Lock() self.disappeared = {}
self.most_recent_frame_time = None self.max_disappeared = max_disappeared
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame_time = self.camera.refined_frame_queue.get()
with self.tracked_objects_lock:
self.match_and_update(self.camera.detected_objects[frame_time])
self.most_recent_frame_time = frame_time
self.camera.frame_output_queue.put((frame_time, copy.deepcopy(self.tracked_objects)))
if len(self.tracked_objects) > 0:
with self.camera.objects_tracked:
self.camera.objects_tracked.notify_all()
def register(self, index, obj): def register(self, index, obj):
id = "{}-{}".format(str(obj['frame_time']), index) id = f"{obj['frame_time']}-{index}"
obj['id'] = id obj['id'] = id
obj['top_score'] = obj['score'] obj['top_score'] = obj['score']
self.add_history(obj) self.add_history(obj)
self.tracked_objects[id] = obj self.tracked_objects[id] = obj
self.disappeared[id] = 0
def deregister(self, id): def deregister(self, id):
del self.tracked_objects[id] del self.tracked_objects[id]
del self.disappeared[id]
def update(self, id, new_obj): def update(self, id, new_obj):
self.disappeared[id] = 0
self.tracked_objects[id].update(new_obj) self.tracked_objects[id].update(new_obj)
self.add_history(self.tracked_objects[id]) self.add_history(self.tracked_objects[id])
if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']: if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']:
@@ -291,25 +48,41 @@ class ObjectTracker(threading.Thread):
else: else:
obj['history'] = [entry] obj['history'] = [entry]
def match_and_update(self, new_objects): def match_and_update(self, frame_time, new_objects):
if len(new_objects) == 0:
return
# group by name # group by name
new_object_groups = defaultdict(lambda: []) new_object_groups = defaultdict(lambda: [])
for obj in new_objects: for obj in new_objects:
new_object_groups[obj['name']].append(obj) new_object_groups[obj[0]].append({
'label': obj[0],
'score': obj[1],
'box': obj[2],
'area': obj[3],
'region': obj[4],
'frame_time': frame_time
})
# update any tracked objects with labels that are not
# seen in the current objects and deregister if needed
for obj in list(self.tracked_objects.values()):
if not obj['label'] in new_object_groups:
if self.disappeared[obj['id']] >= self.max_disappeared:
self.deregister(obj['id'])
else:
self.disappeared[obj['id']] += 1
if len(new_objects) == 0:
return
# track objects for each label type # track objects for each label type
for label, group in new_object_groups.items(): for label, group in new_object_groups.items():
current_objects = [o for o in self.tracked_objects.values() if o['name'] == label] current_objects = [o for o in self.tracked_objects.values() if o['label'] == label]
current_ids = [o['id'] for o in current_objects] current_ids = [o['id'] for o in current_objects]
current_centroids = np.array([o['centroid'] for o in current_objects]) current_centroids = np.array([o['centroid'] for o in current_objects])
# compute centroids of new objects # compute centroids of new objects
for obj in group: for obj in group:
centroid_x = int((obj['box']['xmin']+obj['box']['xmax']) / 2.0) centroid_x = int((obj['box'][0]+obj['box'][2]) / 2.0)
centroid_y = int((obj['box']['ymin']+obj['box']['ymax']) / 2.0) centroid_y = int((obj['box'][1]+obj['box'][3]) / 2.0)
obj['centroid'] = (centroid_x, centroid_y) obj['centroid'] = (centroid_x, centroid_y)
if len(current_objects) == 0: if len(current_objects) == 0:
@@ -363,56 +136,24 @@ class ObjectTracker(threading.Thread):
usedCols.add(col) usedCols.add(col)
# compute the column index we have NOT yet examined # compute the column index we have NOT yet examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols) unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
id = current_ids[row]
if self.disappeared[id] >= self.max_disappeared:
self.deregister(id)
else:
self.disappeared[id] += 1
# if the number of input centroids is greater # if the number of input centroids is greater
# than the number of existing object centroids we need to # than the number of existing object centroids we need to
# register each new input centroid as a trackable object # register each new input centroid as a trackable object
# if D.shape[0] < D.shape[1]: else:
# TODO: rather than assuming these are new objects, we could for col in unusedCols:
# look to see if any of the remaining boxes have a large amount self.register(col, group[col])
# of overlap...
for col in unusedCols:
self.register(col, group[col])
# Maintains the frame and object with the highest score
class BestFrames(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
self.best_objects = {}
self.best_frames = {}
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
# wait until objects have been tracked
with self.camera.objects_tracked:
self.camera.objects_tracked.wait()
# make a copy of tracked objects
tracked_objects = list(self.camera.object_tracker.tracked_objects.values())
for obj in tracked_objects:
if obj['name'] in self.best_objects:
now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score
# or the current object is more than 1 minute old, use the new object
if obj['score'] > self.best_objects[obj['name']]['score'] or (now - self.best_objects[obj['name']]['frame_time']) > 60:
self.best_objects[obj['name']] = copy.deepcopy(obj)
else:
self.best_objects[obj['name']] = copy.deepcopy(obj)
for name, obj in self.best_objects.items():
if obj['frame_time'] in self.camera.frame_cache:
best_frame = self.camera.frame_cache[obj['frame_time']]
draw_box_with_label(best_frame, obj['box']['xmin'], obj['box']['ymin'],
obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']))
# print a timestamp
if self.camera.snapshot_config['show_timestamp']:
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
self.best_frames[name] = best_frame

166
frigate/util.py Normal file → Executable file
View File

@@ -5,83 +5,11 @@ import cv2
import threading import threading
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
# Function to read labels from text files.
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def calculate_region(frame_shape, xmin, ymin, xmax, ymax):
# size is larger than longest edge
size = int(max(xmax-xmin, ymax-ymin)*2)
# if the size is too big to fit in the frame
if size > min(frame_shape[0], frame_shape[1]):
size = min(frame_shape[0], frame_shape[1])
# x_offset is midpoint of bounding box minus half the size
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
# if outside the image
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
x_offset = (frame_shape[1]-size)
# y_offset is midpoint of bounding box minus half the size
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
# if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
y_offset = (frame_shape[0]-size)
return (size, x_offset, y_offset)
def compute_intersection_rectangle(box_a, box_b):
return {
'xmin': max(box_a['xmin'], box_b['xmin']),
'ymin': max(box_a['ymin'], box_b['ymin']),
'xmax': min(box_a['xmax'], box_b['xmax']),
'ymax': min(box_a['ymax'], box_b['ymax'])
}
def compute_intersection_over_union(box_a, box_b):
# determine the (x, y)-coordinates of the intersection rectangle
intersect = compute_intersection_rectangle(box_a, box_b)
# compute the area of intersection rectangle
inter_area = max(0, intersect['xmax'] - intersect['xmin'] + 1) * max(0, intersect['ymax'] - intersect['ymin'] + 1)
if inter_area == 0:
return 0.0
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a['xmax'] - box_a['xmin'] + 1) * (box_a['ymax'] - box_a['ymin'] + 1)
box_b_area = (box_b['xmax'] - box_b['xmin'] + 1) * (box_b['ymax'] - box_b['ymin'] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = inter_area / float(box_a_area + box_b_area - inter_area)
# return the intersection over union value
return iou
# convert shared memory array into numpy array
def tonumpyarray(mp_arr):
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'): def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
if color is None: if color is None:
color = COLOR_MAP[label] color = (0,0,255)
display_text = "{}: {}".format(label, info) display_text = "{}: {}".format(label, info)
cv2.rectangle(frame, (x_min, y_min), cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
(x_max, y_max),
color, thickness)
font_scale = 0.5 font_scale = 0.5
font = cv2.FONT_HERSHEY_SIMPLEX font = cv2.FONT_HERSHEY_SIMPLEX
# get the width and height of the text box # get the width and height of the text box
@@ -107,37 +35,77 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thicknes
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED) cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2) cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
# Path to frozen detection graph. This is the actual model that is used for the object detection. def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
PATH_TO_CKPT = '/frozen_inference_graph.pb' # size is larger than longest edge
# List of the strings that is used to add correct label for each box. size = int(max(xmax-xmin, ymax-ymin)*multiplier)
PATH_TO_LABELS = '/label_map.pbtext' # if the size is too big to fit in the frame
if size > min(frame_shape[0], frame_shape[1]):
size = min(frame_shape[0], frame_shape[1])
LABELS = ReadLabelFile(PATH_TO_LABELS) # x_offset is midpoint of bounding box minus half the size
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys())) x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
# if outside the image
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
x_offset = (frame_shape[1]-size)
COLOR_MAP = {} # y_offset is midpoint of bounding box minus half the size
for key, val in LABELS.items(): y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) # if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
y_offset = (frame_shape[0]-size)
class QueueMerger(): return (x_offset, y_offset, x_offset+size, y_offset+size)
def __init__(self, from_queues, to_queue):
self.from_queues = from_queues
self.to_queue = to_queue
self.merge_threads = []
def start(self): def intersection(box_a, box_b):
for from_q in self.from_queues: return (
self.merge_threads.append(QueueTransfer(from_q,self.to_queue)) max(box_a[0], box_b[0]),
max(box_a[1], box_b[1]),
min(box_a[2], box_b[2]),
min(box_a[3], box_b[3])
)
class QueueTransfer(threading.Thread): def area(box):
def __init__(self, from_queue, to_queue): return (box[2]-box[0] + 1)*(box[3]-box[1] + 1)
threading.Thread.__init__(self)
self.from_queue = from_queue
self.to_queue = to_queue
def run(self): def intersection_over_union(box_a, box_b):
while True: # determine the (x, y)-coordinates of the intersection rectangle
self.to_queue.put(self.from_queue.get()) intersect = intersection(box_a, box_b)
# compute the area of intersection rectangle
inter_area = max(0, intersect[2] - intersect[0] + 1) * max(0, intersect[3] - intersect[1] + 1)
if inter_area == 0:
return 0.0
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = inter_area / float(box_a_area + box_b_area - inter_area)
# return the intersection over union value
return iou
def clipped(obj, frame_shape):
# if the object is within 5 pixels of the region border, and the region is not on the edge
# consider the object to be clipped
box = obj[2]
region = obj[4]
if ((region[0] > 5 and box[0]-region[0] <= 5) or
(region[1] > 5 and box[1]-region[1] <= 5) or
(frame_shape[1]-region[2] > 5 and region[2]-box[2] <= 5) or
(frame_shape[0]-region[3] > 5 and region[3]-box[3] <= 5)):
return True
else:
return False
class EventsPerSecond: class EventsPerSecond:
def __init__(self, max_events=1000): def __init__(self, max_events=1000):

598
frigate/video.py Normal file → Executable file
View File

@@ -8,39 +8,17 @@ import ctypes
import multiprocessing as mp import multiprocessing as mp
import subprocess as sp import subprocess as sp
import numpy as np import numpy as np
import prctl import hashlib
import pyarrow.plasma as plasma
import SharedArray as sa
import copy import copy
import itertools import itertools
import json import json
from collections import defaultdict from collections import defaultdict
from frigate.util import tonumpyarray, LABELS, draw_box_with_label, calculate_region, EventsPerSecond from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond
from frigate.object_detection import RegionPrepper, RegionRequester from frigate.objects import ObjectTracker
from frigate.objects import ObjectCleaner, BestFrames, DetectedObjectsProcessor, RegionRefiner, ObjectTracker from frigate.edgetpu import RemoteObjectDetector
from frigate.mqtt import MqttObjectPublisher from frigate.motion import MotionDetector
# Stores 2 seconds worth of frames so they can be used for other threads
class FrameTracker(threading.Thread):
def __init__(self, frame_time, frame_ready, frame_lock, recent_frames):
threading.Thread.__init__(self)
self.frame_time = frame_time
self.frame_ready = frame_ready
self.frame_lock = frame_lock
self.recent_frames = recent_frames
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
# wait for a frame
with self.frame_ready:
self.frame_ready.wait()
# delete any old frames
stored_frame_times = list(self.recent_frames.keys())
stored_frame_times.sort(reverse=True)
if len(stored_frame_times) > 100:
frames_to_delete = stored_frame_times[50:]
for k in frames_to_delete:
del self.recent_frames[k]
def get_frame_shape(source): def get_frame_shape(source):
ffprobe_cmd = " ".join([ ffprobe_cmd = " ".join([
@@ -76,330 +54,324 @@ def get_ffmpeg_input(ffmpeg_input):
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')} frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
return ffmpeg_input.format(**frigate_vars) return ffmpeg_input.format(**frigate_vars)
class CameraWatchdog(threading.Thread): def filtered(obj, objects_to_track, object_filters, mask):
def __init__(self, camera): object_name = obj[0]
threading.Thread.__init__(self)
self.camera = camera
def run(self): if not object_name in objects_to_track:
prctl.set_name(self.__class__.__name__) return True
while True:
# wait a bit before checking
time.sleep(10)
if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > self.camera.watchdog_timeout: if object_name in object_filters:
print(self.camera.name + ": last frame is more than 5 minutes old, restarting camera capture...") obj_settings = object_filters[object_name]
self.camera.start_or_restart_capture()
time.sleep(5)
# Thread to read the stdout of the ffmpeg process and update the current frame # if the min area is larger than the
class CameraCapture(threading.Thread): # detected object, don't add it to detected objects
def __init__(self, camera): if obj_settings.get('min_area',-1) > obj[3]:
threading.Thread.__init__(self) return True
self.camera = camera
def run(self): # if the detected object is larger than the
prctl.set_name(self.__class__.__name__) # max area, don't add it to detected objects
frame_num = 0 if obj_settings.get('max_area', 24000000) < obj[3]:
while True: return True
if self.camera.ffmpeg_process.poll() != None:
print(self.camera.name + ": ffmpeg process is not running. exiting capture thread...")
break
raw_image = self.camera.ffmpeg_process.stdout.read(self.camera.frame_size) # if the score is lower than the threshold, skip
if obj_settings.get('threshold', 0) > obj[1]:
return True
if len(raw_image) == 0: # compute the coordinates of the object and make sure
print(self.camera.name + ": ffmpeg didnt return a frame. something is wrong. exiting capture thread...") # the location isnt outside the bounds of the image (can happen from rounding)
break y_location = min(int(obj[2][3]), len(mask)-1)
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(mask[0])-1)
frame_num += 1 # if the object is in a masked location, don't add it to detected objects
if (frame_num % self.camera.take_frame) != 0: if mask[y_location][x_location] == [0]:
continue return True
with self.camera.frame_lock: return False
# TODO: use frame_queue instead
self.camera.frame_time.value = datetime.datetime.now().timestamp()
self.camera.frame_cache[self.camera.frame_time.value] = (
np
.frombuffer(raw_image, np.uint8)
.reshape(self.camera.frame_shape)
)
self.camera.frame_queue.put(self.camera.frame_time.value)
# Notify with the condition that a new frame is ready
with self.camera.frame_ready:
self.camera.frame_ready.notify_all()
self.camera.fps.update() def create_tensor_input(frame, region):
cropped_frame = frame[region[1]:region[3], region[0]:region[2]]
class VideoWriter(threading.Thread): # Resize to 300x300 if needed
def __init__(self, camera): if cropped_frame.shape != (300, 300, 3):
threading.Thread.__init__(self) cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
self.camera = camera
def run(self): # Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
prctl.set_name(self.__class__.__name__) return np.expand_dims(cropped_frame, axis=0)
while True:
(frame_time, tracked_objects) = self.camera.frame_output_queue.get()
# if len(tracked_objects) == 0:
# continue
# f = open(f"/debug/output/{self.camera.name}-{str(format(frame_time, '.8f'))}.jpg", 'wb')
# f.write(self.camera.frame_with_objects(frame_time, tracked_objects))
# f.close()
class Camera: def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
def __init__(self, name, ffmpeg_config, global_objects_config, config, prepped_frame_queue, mqtt_client, mqtt_prefix): if not ffmpeg_process is None:
self.name = name print("Terminating the existing ffmpeg process...")
self.config = config ffmpeg_process.terminate()
self.detected_objects = defaultdict(lambda: []) try:
self.frame_cache = {} print("Waiting for ffmpeg to exit gracefully...")
self.last_processed_frame = None ffmpeg_process.wait(timeout=30)
# queue for re-assembling frames in order except sp.TimeoutExpired:
self.frame_queue = queue.Queue() print("FFmpeg didnt exit. Force killing...")
# track how many regions have been requested for a frame so we know when a frame is complete ffmpeg_process.kill()
self.regions_in_process = {} ffmpeg_process.wait()
# Lock to control access
self.regions_in_process_lock = mp.Lock()
self.finished_frame_queue = queue.Queue()
self.refined_frame_queue = queue.Queue()
self.frame_output_queue = queue.Queue()
self.ffmpeg = config.get('ffmpeg', {}) print("Creating ffmpeg process...")
self.ffmpeg_input = get_ffmpeg_input(self.ffmpeg['input']) print(" ".join(ffmpeg_cmd))
self.ffmpeg_global_args = self.ffmpeg.get('global_args', ffmpeg_config['global_args']) return sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=frame_size*10)
self.ffmpeg_hwaccel_args = self.ffmpeg.get('hwaccel_args', ffmpeg_config['hwaccel_args'])
self.ffmpeg_input_args = self.ffmpeg.get('input_args', ffmpeg_config['input_args'])
self.ffmpeg_output_args = self.ffmpeg.get('output_args', ffmpeg_config['output_args'])
camera_objects_config = config.get('objects', {}) def track_camera(name, config, ffmpeg_global_config, global_objects_config, detection_queue, detected_objects_queue, fps, skipped_fps, detection_fps):
print(f"Starting process for {name}: {os.getpid()}")
self.take_frame = self.config.get('take_frame', 1) # Merge the ffmpeg config with the global config
self.watchdog_timeout = self.config.get('watchdog_timeout', 300) ffmpeg = config.get('ffmpeg', {})
self.snapshot_config = { ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
'show_timestamp': self.config.get('snapshots', {}).get('show_timestamp', True) ffmpeg_global_args = ffmpeg.get('global_args', ffmpeg_global_config['global_args'])
} ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', ffmpeg_global_config['hwaccel_args'])
self.regions = self.config['regions'] ffmpeg_input_args = ffmpeg.get('input_args', ffmpeg_global_config['input_args'])
if 'width' in self.config and 'height' in self.config: ffmpeg_output_args = ffmpeg.get('output_args', ffmpeg_global_config['output_args'])
self.frame_shape = (self.config['height'], self.config['width'], 3) ffmpeg_cmd = (['ffmpeg'] +
else: ffmpeg_global_args +
self.frame_shape = get_frame_shape(self.ffmpeg_input) ffmpeg_hwaccel_args +
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2] ffmpeg_input_args +
self.mqtt_client = mqtt_client ['-i', ffmpeg_input] +
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name) ffmpeg_output_args +
# create shared value for storing the frame_time
self.frame_time = mp.Value('d', 0.0)
# Lock to control access to the frame
self.frame_lock = mp.Lock()
# Condition for notifying that a new frame is ready
self.frame_ready = mp.Condition()
# Condition for notifying that objects were tracked
self.objects_tracked = mp.Condition()
# Queue for prepped frames, max size set to (number of regions * 5)
self.resize_queue = queue.Queue()
# Queue for raw detected objects
self.detected_objects_queue = queue.Queue()
self.detected_objects_processor = DetectedObjectsProcessor(self)
self.detected_objects_processor.start()
# initialize the frame cache
self.cached_frame_with_objects = {
'frame_bytes': [],
'frame_time': 0
}
self.ffmpeg_process = None
self.capture_thread = None
self.fps = EventsPerSecond()
self.skipped_region_tracker = EventsPerSecond()
# combine tracked objects lists
self.objects_to_track = set().union(global_objects_config.get('track', ['person', 'car', 'truck']), camera_objects_config.get('track', []))
# merge object filters
global_object_filters = global_objects_config.get('filters', {})
camera_object_filters = camera_objects_config.get('filters', {})
objects_with_config = set().union(global_object_filters.keys(), camera_object_filters.keys())
self.object_filters = {}
for obj in objects_with_config:
self.object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
# start a thread to track objects
self.object_tracker = ObjectTracker(self, 10)
self.object_tracker.start()
# start a thread to write tracked frames to disk
self.video_writer = VideoWriter(self)
self.video_writer.start()
# start a thread to queue resize requests for regions
self.region_requester = RegionRequester(self)
self.region_requester.start()
# start a thread to cache recent frames for processing
self.frame_tracker = FrameTracker(self.frame_time,
self.frame_ready, self.frame_lock, self.frame_cache)
self.frame_tracker.start()
# start a thread to resize regions
self.region_prepper = RegionPrepper(self, self.frame_cache, self.resize_queue, prepped_frame_queue)
self.region_prepper.start()
# start a thread to store the highest scoring recent frames for monitored object types
self.best_frames = BestFrames(self)
self.best_frames.start()
# start a thread to expire objects from the detected objects list
self.object_cleaner = ObjectCleaner(self)
self.object_cleaner.start()
# start a thread to refine regions when objects are clipped
self.dynamic_region_fps = EventsPerSecond()
self.region_refiner = RegionRefiner(self)
self.region_refiner.start()
self.dynamic_region_fps.start()
# start a thread to publish object scores
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self)
mqtt_publisher.start()
# create a watchdog thread for capture process
self.watchdog = CameraWatchdog(self)
# load in the mask for object detection
if 'mask' in self.config:
self.mask = cv2.imread("/config/{}".format(self.config['mask']), cv2.IMREAD_GRAYSCALE)
else:
self.mask = None
if self.mask is None:
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
self.mask[:] = 255
def start_or_restart_capture(self):
if not self.ffmpeg_process is None:
print("Terminating the existing ffmpeg process...")
self.ffmpeg_process.terminate()
try:
print("Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_process.wait(timeout=30)
except sp.TimeoutExpired:
print("FFmpeg didnt exit. Force killing...")
self.ffmpeg_process.kill()
self.ffmpeg_process.wait()
print("Waiting for the capture thread to exit...")
self.capture_thread.join()
self.ffmpeg_process = None
self.capture_thread = None
# create the process to capture frames from the input stream and store in a shared array
print("Creating a new ffmpeg process...")
self.start_ffmpeg()
print("Creating a new capture thread...")
self.capture_thread = CameraCapture(self)
print("Starting a new capture thread...")
self.capture_thread.start()
self.fps.start()
self.skipped_region_tracker.start()
def start_ffmpeg(self):
ffmpeg_cmd = (['ffmpeg'] +
self.ffmpeg_global_args +
self.ffmpeg_hwaccel_args +
self.ffmpeg_input_args +
['-i', self.ffmpeg_input] +
self.ffmpeg_output_args +
['pipe:']) ['pipe:'])
print(" ".join(ffmpeg_cmd)) # Merge the tracked object config with the global config
camera_objects_config = config.get('objects', {})
# combine tracked objects lists
objects_to_track = set().union(global_objects_config.get('track', ['person', 'car', 'truck']), camera_objects_config.get('track', []))
# merge object filters
global_object_filters = global_objects_config.get('filters', {})
camera_object_filters = camera_objects_config.get('filters', {})
objects_with_config = set().union(global_object_filters.keys(), camera_object_filters.keys())
object_filters = {}
for obj in objects_with_config:
object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
self.ffmpeg_process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=self.frame_size) expected_fps = config['fps']
take_frame = config.get('take_frame', 1)
def start(self): if 'width' in config and 'height' in config:
self.start_or_restart_capture() frame_shape = (config['height'], config['width'], 3)
self.watchdog.start() else:
frame_shape = get_frame_shape(ffmpeg_input)
def join(self): frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
self.capture_thread.join()
def get_capture_pid(self): try:
return self.ffmpeg_process.pid sa.delete(name)
except:
pass
def get_best(self, label): frame = sa.create(name, shape=frame_shape, dtype=np.uint8)
return self.best_frames.best_frames.get(label)
def stats(self): # load in the mask for object detection
return { if 'mask' in config:
'camera_fps': self.fps.eps(60), mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
'resize_queue': self.resize_queue.qsize(), else:
'frame_queue': self.frame_queue.qsize(), mask = None
'finished_frame_queue': self.finished_frame_queue.qsize(),
'refined_frame_queue': self.refined_frame_queue.qsize(),
'regions_in_process': self.regions_in_process,
'dynamic_regions_per_sec': self.dynamic_region_fps.eps(),
'skipped_regions_per_sec': self.skipped_region_tracker.eps(60)
}
def frame_with_objects(self, frame_time, tracked_objects=None): if mask is None:
if not frame_time in self.frame_cache: mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
frame = np.zeros(self.frame_shape, np.uint8) mask[:] = 255
else:
frame = self.frame_cache[frame_time].copy()
detected_objects = self.detected_objects[frame_time].copy() motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue)
for region in self.regions: object_tracker = ObjectTracker(10)
color = (255,255,255)
cv2.rectangle(frame, (region['x_offset'], region['y_offset']),
(region['x_offset']+region['size'], region['y_offset']+region['size']),
color, 2)
# draw the bounding boxes on the screen ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
if tracked_objects is None: plasma_client = plasma.connect("/tmp/plasma")
with self.object_tracker.tracked_objects_lock: frame_num = 0
tracked_objects = copy.deepcopy(self.object_tracker.tracked_objects) avg_wait = 0.0
fps_tracker = EventsPerSecond()
skipped_fps_tracker = EventsPerSecond()
fps_tracker.start()
skipped_fps_tracker.start()
object_detector.fps.start()
while True:
start = datetime.datetime.now().timestamp()
frame_bytes = ffmpeg_process.stdout.read(frame_size)
duration = datetime.datetime.now().timestamp()-start
avg_wait = (avg_wait*99+duration)/100
for obj in detected_objects: if not frame_bytes:
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']), thickness=3) rc = ffmpeg_process.poll()
if rc is not None:
print(f"{name}: ffmpeg_process exited unexpectedly with {rc}")
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process)
time.sleep(10)
else:
print(f"{name}: ffmpeg_process is still running but didnt return any bytes")
continue
for id, obj in tracked_objects.items(): # limit frame rate
color = (0, 255,0) if obj['frame_time'] == frame_time else (255, 0, 0) frame_num += 1
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], id, color=color, thickness=1, position='bl') if (frame_num % take_frame) != 0:
continue
# print a timestamp fps_tracker.update()
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S") fps.value = fps_tracker.eps()
cv2.putText(frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2) detection_fps.value = object_detector.fps.eps()
# print fps frame_time = datetime.datetime.now().timestamp()
cv2.putText(frame, str(self.fps.eps())+'FPS', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
# convert to BGR # Store frame in numpy array
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) frame[:] = (np
.frombuffer(frame_bytes, np.uint8)
.reshape(frame_shape))
# encode the image into a jpg # look for motion
ret, jpg = cv2.imencode('.jpg', frame) motion_boxes = motion_detector.detect(frame)
return jpg.tobytes() # skip object detection if we are below the min_fps and wait time is less than half the average
if frame_num > 100 and fps.value < expected_fps-1 and duration < 0.5*avg_wait:
skipped_fps_tracker.update()
skipped_fps.value = skipped_fps_tracker.eps()
continue
def get_current_frame_with_objects(self): skipped_fps.value = skipped_fps_tracker.eps()
frame_time = self.last_processed_frame
if frame_time == self.cached_frame_with_objects['frame_time']:
return self.cached_frame_with_objects['frame_bytes']
frame_bytes = self.frame_with_objects(frame_time) tracked_objects = object_tracker.tracked_objects.values()
self.cached_frame_with_objects = { # merge areas of motion that intersect with a known tracked object into a single area to look at
'frame_bytes': frame_bytes, areas_of_interest = []
'frame_time': frame_time used_motion_boxes = []
} for obj in tracked_objects:
x_min, y_min, x_max, y_max = obj['box']
for m_index, motion_box in enumerate(motion_boxes):
if area(intersection(obj['box'], motion_box))/area(motion_box) > .5:
used_motion_boxes.append(m_index)
x_min = min(obj['box'][0], motion_box[0])
y_min = min(obj['box'][1], motion_box[1])
x_max = max(obj['box'][2], motion_box[2])
y_max = max(obj['box'][3], motion_box[3])
areas_of_interest.append((x_min, y_min, x_max, y_max))
unused_motion_boxes = set(range(0, len(motion_boxes))).difference(used_motion_boxes)
return frame_bytes # compute motion regions
motion_regions = [calculate_region(frame_shape, motion_boxes[i][0], motion_boxes[i][1], motion_boxes[i][2], motion_boxes[i][3], 1.2)
for i in unused_motion_boxes]
# compute tracked object regions
object_regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
for a in areas_of_interest]
# merge regions with high IOU
merged_regions = motion_regions+object_regions
while True:
max_iou = 0.0
max_indices = None
region_indices = range(len(merged_regions))
for a, b in itertools.combinations(region_indices, 2):
iou = intersection_over_union(merged_regions[a], merged_regions[b])
if iou > max_iou:
max_iou = iou
max_indices = (a, b)
if max_iou > 0.1:
a = merged_regions[max_indices[0]]
b = merged_regions[max_indices[1]]
merged_regions.append(calculate_region(frame_shape,
min(a[0], b[0]),
min(a[1], b[1]),
max(a[2], b[2]),
max(a[3], b[3]),
1
))
del merged_regions[max(max_indices[0], max_indices[1])]
del merged_regions[min(max_indices[0], max_indices[1])]
else:
break
# resize regions and detect
detections = []
for region in merged_regions:
tensor_input = create_tensor_input(frame, region)
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
if filtered(det, objects_to_track, object_filters, mask):
continue
detections.append(det)
#########
# merge objects, check for clipped objects and look again up to N times
#########
refining = True
refine_count = 0
while refining and refine_count < 4:
refining = False
# group by name
detected_object_groups = defaultdict(lambda: [])
for detection in detections:
detected_object_groups[detection[0]].append(detection)
selected_objects = []
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = [(o[2][0], o[2][1], o[2][2]-o[2][0], o[2][3]-o[2][1])
for o in group]
confidences = [o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for index in idxs:
obj = group[index[0]]
if clipped(obj, frame_shape): #obj['clipped']:
box = obj[2]
# calculate a new region that will hopefully get the entire object
region = calculate_region(frame_shape,
box[0], box[1],
box[2], box[3])
tensor_input = create_tensor_input(frame, region)
# run detection on new region
refined_detections = object_detector.detect(tensor_input)
for d in refined_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
if filtered(det, objects_to_track, object_filters, mask):
continue
selected_objects.append(det)
refining = True
else:
selected_objects.append(obj)
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects
if refining:
refine_count += 1
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, detections)
# put the frame in the plasma store
object_id = hashlib.sha1(str.encode(f"{name}{frame_time}")).digest()
plasma_client.put(frame, plasma.ObjectID(object_id))
# add to the queue
detected_objects_queue.put((name, frame_time, object_tracker.tracked_objects))
print(f"{name}: exiting subprocess")