mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-09-26 11:31:28 +08:00
docs rewrite
This commit is contained in:
Binary file not shown.
Before Width: | Height: | Size: 1.8 MiB |
@@ -1,234 +0,0 @@
|
||||
web_port: 5000
|
||||
|
||||
################
|
||||
## List of detectors.
|
||||
## Currently supported types: cpu, edgetpu
|
||||
## EdgeTPU requires device as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api
|
||||
################
|
||||
detectors:
|
||||
coral:
|
||||
type: edgetpu
|
||||
device: usb
|
||||
|
||||
mqtt:
|
||||
host: mqtt.server.com
|
||||
topic_prefix: frigate
|
||||
# client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
|
||||
# user: username # Optional
|
||||
#################
|
||||
## Environment variables that begin with 'FRIGATE_' may be referenced in {}.
|
||||
## password: '{FRIGATE_MQTT_PASSWORD}'
|
||||
#################
|
||||
# password: password # Optional
|
||||
|
||||
################
|
||||
# Global configuration for saving clips
|
||||
################
|
||||
save_clips:
|
||||
###########
|
||||
# Maximum length of time to retain video during long events.
|
||||
# If an object is being tracked for longer than this amount of time, the cache
|
||||
# will begin to expire and the resulting clip will be the last x seconds of the event.
|
||||
###########
|
||||
max_seconds: 300
|
||||
clips_dir: /clips
|
||||
cache_dir: /cache
|
||||
|
||||
#################
|
||||
# Default ffmpeg args. Optional and can be overwritten per camera.
|
||||
# Should work with most RTSP cameras that send h264 video
|
||||
# Built from the properties below with:
|
||||
# "ffmpeg" + global_args + input_args + "-i" + input + output_args
|
||||
#################
|
||||
# ffmpeg:
|
||||
# global_args:
|
||||
# - -hide_banner
|
||||
# - -loglevel
|
||||
# - panic
|
||||
# hwaccel_args: []
|
||||
# input_args:
|
||||
# - -avoid_negative_ts
|
||||
# - make_zero
|
||||
# - -fflags
|
||||
# - nobuffer
|
||||
# - -flags
|
||||
# - low_delay
|
||||
# - -strict
|
||||
# - experimental
|
||||
# - -fflags
|
||||
# - +genpts+discardcorrupt
|
||||
# - -vsync
|
||||
# - drop
|
||||
# - -rtsp_transport
|
||||
# - tcp
|
||||
# - -stimeout
|
||||
# - '5000000'
|
||||
# - -use_wallclock_as_timestamps
|
||||
# - '1'
|
||||
# output_args:
|
||||
# - -f
|
||||
# - rawvideo
|
||||
# - -pix_fmt
|
||||
# - yuv420p
|
||||
|
||||
####################
|
||||
# Global object configuration. Applies to all cameras
|
||||
# unless overridden at the camera levels.
|
||||
# Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt).
|
||||
# All labels from the model are reported over MQTT. These values are used to filter out false positives.
|
||||
# min_area (optional): minimum width*height of the bounding box for the detected object
|
||||
# max_area (optional): maximum width*height of the bounding box for the detected object
|
||||
# min_score (optional): minimum score for the object to initiate tracking
|
||||
# threshold (optional): The minimum decimal percentage for tracked object's computed score to considered a true positive
|
||||
####################
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
filters:
|
||||
person:
|
||||
min_area: 5000
|
||||
max_area: 100000
|
||||
min_score: 0.5
|
||||
threshold: 0.85
|
||||
|
||||
cameras:
|
||||
back:
|
||||
ffmpeg:
|
||||
################
|
||||
# Source passed to ffmpeg after the -i parameter. Supports anything compatible with OpenCV and FFmpeg.
|
||||
# Environment variables that begin with 'FRIGATE_' may be referenced in {}
|
||||
################
|
||||
input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
#################
|
||||
# These values will override default values for just this camera
|
||||
#################
|
||||
# global_args: []
|
||||
# hwaccel_args: []
|
||||
# input_args: []
|
||||
# output_args: []
|
||||
|
||||
################
|
||||
## Optionally specify the resolution of the video feed. Frigate will try to auto detect if not specified
|
||||
################
|
||||
# height: 1280
|
||||
# width: 720
|
||||
|
||||
################
|
||||
## Specify the framerate of your camera
|
||||
##
|
||||
## NOTE: This should only be set in the event ffmpeg is unable to determine your camera's framerate
|
||||
## on its own and the reported framerate for your camera in frigate is well over what is expected.
|
||||
################
|
||||
# fps: 5
|
||||
|
||||
################
|
||||
## Optional mask. Must be the same aspect ratio as your video feed. Value is any of the following:
|
||||
## - name of a file in the config directory
|
||||
## - base64 encoded image prefixed with 'base64,' eg. 'base64,asfasdfasdf....'
|
||||
## - polygon of x,y coordinates prefixed with 'poly,' eg. 'poly,0,900,1080,900,1080,1920,0,1920'
|
||||
##
|
||||
## The mask works by looking at the bottom center of the bounding box for the detected
|
||||
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
|
||||
## false positive. In my mask, the grass and driveway visible from my backdoor camera
|
||||
## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
|
||||
## person to stand) are black.
|
||||
##
|
||||
## Masked areas are also ignored for motion detection.
|
||||
################
|
||||
# mask: back-mask.bmp
|
||||
|
||||
################
|
||||
# Allows you to limit the framerate within frigate for cameras that do not support
|
||||
# custom framerates. A value of 1 tells frigate to look at every frame, 2 every 2nd frame,
|
||||
# 3 every 3rd frame, etc.
|
||||
################
|
||||
take_frame: 1
|
||||
|
||||
################
|
||||
# The number of seconds to retain the highest scoring image for the best.jpg endpoint before allowing it
|
||||
# to be replaced by a newer image. Defaults to 60 seconds.
|
||||
################
|
||||
best_image_timeout: 60
|
||||
|
||||
################
|
||||
# MQTT settings
|
||||
################
|
||||
# mqtt:
|
||||
# crop_to_region: True
|
||||
# snapshot_height: 300
|
||||
|
||||
################
|
||||
# Zones
|
||||
################
|
||||
zones:
|
||||
#################
|
||||
# Name of the zone
|
||||
################
|
||||
front_steps:
|
||||
####################
|
||||
# A list of x,y coordinates to define the polygon of the zone. The top
|
||||
# left corner is 0,0. Can also be a comma separated string of all x,y coordinates combined.
|
||||
# The same zone name can exist across multiple cameras if they have overlapping FOVs.
|
||||
# An object is determined to be in the zone based on whether or not the bottom center
|
||||
# of it's bounding box is within the polygon. The polygon must have at least 3 points.
|
||||
# Coordinates can be generated at https://www.image-map.net/
|
||||
####################
|
||||
coordinates:
|
||||
- 545,1077
|
||||
- 747,939
|
||||
- 788,805
|
||||
################
|
||||
# Zone level object filters. These are applied in addition to the global and camera filters
|
||||
# and should be more restrictive than the global and camera filters. The global and camera
|
||||
# filters are applied upstream.
|
||||
################
|
||||
filters:
|
||||
person:
|
||||
min_area: 5000
|
||||
max_area: 100000
|
||||
threshold: 0.8
|
||||
|
||||
################
|
||||
# This will save a clip for each tracked object by frigate along with a json file that contains
|
||||
# data related to the tracked object. This works by telling ffmpeg to write video segments to /cache
|
||||
# from the video stream without re-encoding. Clips are then created by using ffmpeg to merge segments
|
||||
# without re-encoding. The segments saved are unaltered from what frigate receives to avoid re-encoding.
|
||||
# They do not contain bounding boxes. These are optimized to capture "false_positive" examples for improving frigate.
|
||||
#
|
||||
# NOTE: This feature does not work if you have "-vsync drop" configured in your input params.
|
||||
# This will only work for camera feeds that can be copied into the mp4 container format without
|
||||
# encoding such as h264. It may not work for some types of streams.
|
||||
################
|
||||
save_clips:
|
||||
enabled: False
|
||||
#########
|
||||
# Number of seconds before the event to include in the clips
|
||||
#########
|
||||
pre_capture: 30
|
||||
#########
|
||||
# Objects to save clips for. Defaults to all tracked object types.
|
||||
#########
|
||||
# objects:
|
||||
# - person
|
||||
|
||||
################
|
||||
# Configuration for the snapshots in the debug view and mqtt
|
||||
################
|
||||
snapshots:
|
||||
show_timestamp: True
|
||||
draw_zones: False
|
||||
draw_bounding_boxes: True
|
||||
|
||||
################
|
||||
# Camera level object config. If defined, this is used instead of the global config.
|
||||
################
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
- car
|
||||
filters:
|
||||
person:
|
||||
min_area: 5000
|
||||
max_area: 100000
|
||||
min_score: 0.5
|
||||
threshold: 0.85
|
Reference in New Issue
Block a user