mirror of
				https://github.com/blakeblackshear/frigate.git
				synced 2025-10-31 11:06:35 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			v0.16.0
			...
			dependabot
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 0b2bfe655c | 
| @@ -108,8 +108,8 @@ imagestream | ||||
| imdecode | ||||
| imencode | ||||
| imread | ||||
| imutils | ||||
| imwrite | ||||
| inpoint | ||||
| interp | ||||
| iostat | ||||
| iotop | ||||
| @@ -265,7 +265,6 @@ tensorrt | ||||
| tflite | ||||
| thresholded | ||||
| timelapse | ||||
| titlecase | ||||
| tmpfs | ||||
| tobytes | ||||
| toggleable | ||||
|   | ||||
| @@ -73,7 +73,7 @@ body: | ||||
|     attributes: | ||||
|       label: Operating system | ||||
|       options: | ||||
|         - Home Assistant OS | ||||
|         - HassOS | ||||
|         - Debian | ||||
|         - Other Linux | ||||
|         - Proxmox | ||||
| @@ -87,7 +87,7 @@ body: | ||||
|     attributes: | ||||
|       label: Install method | ||||
|       options: | ||||
|         - Home Assistant Add-on | ||||
|         - HassOS Addon | ||||
|         - Docker Compose | ||||
|         - Docker CLI | ||||
|         - Proxmox via Docker | ||||
|   | ||||
| @@ -59,7 +59,7 @@ body: | ||||
|     attributes: | ||||
|       label: Operating system | ||||
|       options: | ||||
|         - Home Assistant OS | ||||
|         - HassOS | ||||
|         - Debian | ||||
|         - Other Linux | ||||
|         - Proxmox | ||||
| @@ -73,7 +73,7 @@ body: | ||||
|     attributes: | ||||
|       label: Install method | ||||
|       options: | ||||
|         - Home Assistant Add-on | ||||
|         - HassOS Addon | ||||
|         - Docker Compose | ||||
|         - Docker CLI | ||||
|         - Proxmox via Docker | ||||
|   | ||||
| @@ -53,7 +53,7 @@ body: | ||||
|     attributes: | ||||
|       label: Install method | ||||
|       options: | ||||
|         - Home Assistant Add-on | ||||
|         - HassOS Addon | ||||
|         - Docker Compose | ||||
|         - Docker CLI | ||||
|         - Proxmox via Docker | ||||
|   | ||||
| @@ -73,7 +73,7 @@ body: | ||||
|     attributes: | ||||
|       label: Install method | ||||
|       options: | ||||
|         - Home Assistant Add-on | ||||
|         - HassOS Addon | ||||
|         - Docker Compose | ||||
|         - Docker CLI | ||||
|         - Proxmox via Docker | ||||
|   | ||||
| @@ -69,7 +69,7 @@ body: | ||||
|     attributes: | ||||
|       label: Install method | ||||
|       options: | ||||
|         - Home Assistant Add-on | ||||
|         - HassOS Addon | ||||
|         - Docker Compose | ||||
|         - Docker CLI | ||||
|         - Proxmox via Docker | ||||
|   | ||||
							
								
								
									
										4
									
								
								.github/DISCUSSION_TEMPLATE/report-a-bug.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/DISCUSSION_TEMPLATE/report-a-bug.yml
									
									
									
									
										vendored
									
									
								
							| @@ -97,7 +97,7 @@ body: | ||||
|     attributes: | ||||
|       label: Operating system | ||||
|       options: | ||||
|         - Home Assistant OS | ||||
|         - HassOS | ||||
|         - Debian | ||||
|         - Other Linux | ||||
|         - Proxmox | ||||
| @@ -111,7 +111,7 @@ body: | ||||
|     attributes: | ||||
|       label: Install method | ||||
|       options: | ||||
|         - Home Assistant Add-on | ||||
|         - HassOS Addon | ||||
|         - Docker Compose | ||||
|         - Docker CLI | ||||
|     validations: | ||||
|   | ||||
							
								
								
									
										9
									
								
								.github/pull_request_template.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/pull_request_template.md
									
									
									
									
										vendored
									
									
								
							| @@ -2,12 +2,12 @@ | ||||
| <!-- | ||||
|   Thank you! | ||||
|  | ||||
|   If you're introducing a new feature or significantly refactoring existing functionality, | ||||
|   we encourage you to start a discussion first. This helps ensure your idea aligns with | ||||
|   If you're introducing a new feature or significantly refactoring existing functionality,  | ||||
|   we encourage you to start a discussion first. This helps ensure your idea aligns with  | ||||
|   Frigate's development goals. | ||||
|  | ||||
|   Describe what this pull request does and how it will benefit users of Frigate. | ||||
|   Please describe in detail any considerations, breaking changes, etc. that are | ||||
|   Please describe in detail any considerations, breaking changes, etc. that are  | ||||
|   made in this pull request. | ||||
| --> | ||||
|  | ||||
| @@ -24,7 +24,7 @@ | ||||
| ## Additional information | ||||
|  | ||||
| - This PR fixes or closes issue: fixes # | ||||
| - This PR is related to issue: | ||||
| - This PR is related to issue:  | ||||
|  | ||||
| ## Checklist | ||||
|  | ||||
| @@ -35,5 +35,4 @@ | ||||
| - [ ] The code change is tested and works locally. | ||||
| - [ ] Local tests pass. **Your PR cannot be merged unless tests pass** | ||||
| - [ ] There is no commented out code in this PR. | ||||
| - [ ] UI changes including text have used i18n keys and have been added to the `en` locale. | ||||
| - [ ] The code has been formatted using Ruff (`ruff format frigate`) | ||||
|   | ||||
							
								
								
									
										7
									
								
								.github/workflows/ci.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/ci.yml
									
									
									
									
										vendored
									
									
								
							| @@ -41,7 +41,6 @@ jobs: | ||||
|           target: frigate | ||||
|           tags: ${{ steps.setup.outputs.image-name }}-amd64 | ||||
|           cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 | ||||
|           cache-to: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max | ||||
|   arm64_build: | ||||
|     runs-on: ubuntu-22.04-arm | ||||
|     name: ARM Build | ||||
| @@ -162,8 +161,8 @@ jobs: | ||||
|           files: docker/tensorrt/trt.hcl | ||||
|           set: | | ||||
|             tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt | ||||
|             *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt | ||||
|             *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt,mode=max | ||||
|             *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 | ||||
|             *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max | ||||
|       - name: AMD/ROCm general build | ||||
|         env: | ||||
|           AMDGPU: gfx | ||||
| @@ -177,7 +176,7 @@ jobs: | ||||
|           set: | | ||||
|             rocm.tags=${{ steps.setup.outputs.image-name }}-rocm | ||||
|             *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm,mode=max | ||||
|             *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm | ||||
|             *.cache-from=type=gha | ||||
|   arm64_extra_builds: | ||||
|     runs-on: ubuntu-22.04-arm | ||||
|     name: ARM Extra Build | ||||
|   | ||||
							
								
								
									
										4
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							| @@ -39,14 +39,14 @@ jobs: | ||||
|           STABLE_TAG=${BASE}:stable | ||||
|           PULL_TAG=${BASE}:${BUILD_TAG} | ||||
|           docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG} | ||||
|           for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do | ||||
|           for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do | ||||
|             docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant} | ||||
|           done | ||||
|  | ||||
|           # stable tag | ||||
|           if [[ "${BUILD_TYPE}" == "stable" ]]; then | ||||
|             docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG} | ||||
|             for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do | ||||
|             for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do | ||||
|               docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant} | ||||
|             done | ||||
|           fi | ||||
|   | ||||
							
								
								
									
										20
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										20
									
								
								README.md
									
									
									
									
									
								
							| @@ -4,15 +4,9 @@ | ||||
|  | ||||
| # Frigate - NVR With Realtime Object Detection for IP Cameras | ||||
|  | ||||
| <a href="https://hosted.weblate.org/engage/frigate-nvr/"> | ||||
| <img src="https://hosted.weblate.org/widget/frigate-nvr/language-badge.svg" alt="Translation status" /> | ||||
| </a> | ||||
|  | ||||
| \[English\] | [简体中文](https://github.com/blakeblackshear/frigate/blob/dev/README_CN.md) | ||||
|  | ||||
| A complete and local NVR designed for [Home Assistant](https://www.home-assistant.io) with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. | ||||
|  | ||||
| Use of a GPU or AI accelerator such as a [Google Coral](https://coral.ai/products/) or [Hailo](https://hailo.ai/) is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead. | ||||
| Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but highly recommended. The Coral will outperform even the best CPUs and can process 100+ FPS with very little overhead. | ||||
|  | ||||
| - Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration) | ||||
| - Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary | ||||
| @@ -36,33 +30,21 @@ If you would like to make a donation to support development, please use [Github | ||||
| ## Screenshots | ||||
|  | ||||
| ### Live dashboard | ||||
|  | ||||
| <div> | ||||
| <img width="800" alt="Live dashboard" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e"> | ||||
| </div> | ||||
|  | ||||
| ### Streamlined review workflow | ||||
|  | ||||
| <div> | ||||
| <img width="800" alt="Streamlined review workflow" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff"> | ||||
| </div> | ||||
|  | ||||
| ### Multi-camera scrubbing | ||||
|  | ||||
| <div> | ||||
| <img width="800" alt="Multi-camera scrubbing" src="https://github.com/blakeblackshear/frigate/assets/569905/d6788a15-0eeb-4427-a8d4-80b93cae3d74"> | ||||
| </div> | ||||
|  | ||||
| ### Built-in mask and zone editor | ||||
|  | ||||
| <div> | ||||
| <img width="800" alt="Multi-camera scrubbing" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5"> | ||||
| </div> | ||||
|  | ||||
| ## Translations | ||||
|  | ||||
| We use [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) to support language translations. Contributions are always welcome. | ||||
|  | ||||
| <a href="https://hosted.weblate.org/engage/frigate-nvr/"> | ||||
| <img src="https://hosted.weblate.org/widget/frigate-nvr/multi-auto.svg" alt="Translation status" /> | ||||
| </a> | ||||
|   | ||||
							
								
								
									
										70
									
								
								README_CN.md
									
									
									
									
									
								
							
							
						
						
									
										70
									
								
								README_CN.md
									
									
									
									
									
								
							| @@ -1,70 +0,0 @@ | ||||
| <p align="center"> | ||||
|   <img align="center" alt="logo" src="docs/static/img/frigate.png"> | ||||
| </p> | ||||
|  | ||||
| # Frigate - 一个具有实时目标检测的本地NVR | ||||
|  | ||||
| [English](https://github.com/blakeblackshear/frigate) | \[简体中文\]  | ||||
|  | ||||
| <a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/"> | ||||
| <img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" /> | ||||
| </a> | ||||
|  | ||||
| 一个完整的本地网络视频录像机(NVR),专为[Home Assistant](https://www.home-assistant.io)设计,具备AI物体检测功能。使用OpenCV和TensorFlow在本地为IP摄像头执行实时物体检测。 | ||||
|  | ||||
| 强烈推荐使用GPU或者AI加速器(例如[Google Coral加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/))。它们的性能甚至超过目前的顶级CPU,并且可以以极低的耗电实现更优的性能。 | ||||
| - 通过[自定义组件](https://github.com/blakeblackshear/frigate-hass-integration)与Home Assistant紧密集成 | ||||
| - 设计上通过仅在必要时和必要地点寻找物体,最大限度地减少资源使用并最大化性能 | ||||
| - 大量利用多进程处理,强调实时性而非处理每一帧 | ||||
| - 使用非常低开销的运动检测来确定运行物体检测的位置 | ||||
| - 使用TensorFlow进行物体检测,运行在单独的进程中以达到最大FPS | ||||
| - 通过MQTT进行通信,便于集成到其他系统中 | ||||
| - 根据检测到的物体设置保留时间进行视频录制 | ||||
| - 24/7全天候录制 | ||||
| - 通过RTSP重新流传输以减少摄像头的连接数 | ||||
| - 支持WebRTC和MSE,实现低延迟的实时观看 | ||||
|  | ||||
| ## 社区中文翻译文档 | ||||
|  | ||||
| 你可以在这里查看文档 https://docs.frigate-cn.video | ||||
|  | ||||
| ## 赞助 | ||||
|  | ||||
| 如果您想通过捐赠支持开发,请使用 [Github Sponsors](https://github.com/sponsors/blakeblackshear)。 | ||||
|  | ||||
| ## 截图 | ||||
|  | ||||
| ### 实时监控面板 | ||||
| <div> | ||||
| <img width="800" alt="实时监控面板" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e"> | ||||
| </div> | ||||
|  | ||||
| ### 简单的核查工作流程 | ||||
| <div> | ||||
| <img width="800" alt="简单的审查工作流程" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff"> | ||||
| </div> | ||||
|  | ||||
| ### 多摄像头可按时间轴查看 | ||||
| <div> | ||||
| <img width="800" alt="多摄像头可按时间轴查看" src="https://github.com/blakeblackshear/frigate/assets/569905/d6788a15-0eeb-4427-a8d4-80b93cae3d74"> | ||||
| </div> | ||||
|  | ||||
| ### 内置遮罩和区域编辑器 | ||||
| <div> | ||||
| <img width="800" alt="内置遮罩和区域编辑器" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5"> | ||||
| </div> | ||||
|  | ||||
|  | ||||
| ## 翻译 | ||||
| 我们使用 [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) 平台提供翻译支持,欢迎参与进来一起完善。 | ||||
|  | ||||
|  | ||||
| ## 非官方中文讨论社区 | ||||
| 欢迎加入中文讨论QQ群:[1043861059](https://qm.qq.com/q/7vQKsTmSz) | ||||
|  | ||||
| Bilibili:https://space.bilibili.com/3546894915602564 | ||||
|  | ||||
|  | ||||
| ## 中文社区赞助商 | ||||
| [](https://edgeone.ai/zh?from=github) | ||||
| 本项目 CDN 加速及安全防护由 Tencent EdgeOne 赞助 | ||||
| @@ -6,7 +6,7 @@ import numpy as np | ||||
|  | ||||
| import frigate.util as util | ||||
| from frigate.config import DetectorTypeEnum | ||||
| from frigate.object_detection.base import ( | ||||
| from frigate.object_detection import ( | ||||
|     ObjectDetectProcess, | ||||
|     RemoteObjectDetector, | ||||
|     load_labels, | ||||
|   | ||||
| @@ -1,8 +1,8 @@ | ||||
| version: "3" | ||||
| services: | ||||
|   devcontainer: | ||||
|     container_name: frigate-devcontainer | ||||
|     # Check host system's actual render/video/plugdev group IDs with 'getent group render', 'getent group video', and 'getent group plugdev' | ||||
|     # Must add these exact IDs in container's group_add section or OpenVINO GPU acceleration will fail | ||||
|     # add groups from host for render, plugdev, video | ||||
|     group_add: | ||||
|       - "109" # render | ||||
|       - "110" # render | ||||
| @@ -24,8 +24,8 @@ services: | ||||
|     #                     capabilities: [gpu] | ||||
|     environment: | ||||
|       YOLO_MODELS: "" | ||||
|     # devices: | ||||
|       # - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB | ||||
|     devices: | ||||
|       - /dev/bus/usb:/dev/bus/usb | ||||
|       # - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware | ||||
|     volumes: | ||||
|       - .:/workspace/frigate:cached | ||||
| @@ -33,10 +33,9 @@ services: | ||||
|       - /etc/localtime:/etc/localtime:ro | ||||
|       - ./config:/config | ||||
|       - ./debug:/media/frigate | ||||
|      # - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB | ||||
|       - /dev/bus/usb:/dev/bus/usb | ||||
|   mqtt: | ||||
|     container_name: mqtt | ||||
|     image: eclipse-mosquitto:2.0 | ||||
|     command: mosquitto -c /mosquitto-no-auth.conf # enable no-auth mode | ||||
|     image: eclipse-mosquitto:1.6 | ||||
|     ports: | ||||
|       - "1883:1883" | ||||
|       - "1883:1883" | ||||
| @@ -4,7 +4,7 @@ | ||||
| sudo apt-get update | ||||
| sudo apt-get install -y build-essential cmake git wget | ||||
|  | ||||
| hailo_version="4.21.0" | ||||
| hailo_version="4.20.0" | ||||
| arch=$(uname -m) | ||||
|  | ||||
| if [[ $arch == "x86_64" ]]; then | ||||
|   | ||||
| @@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ | ||||
| FROM scratch AS go2rtc | ||||
| ARG TARGETARCH | ||||
| WORKDIR /rootfs/usr/local/go2rtc/bin | ||||
| ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.9/go2rtc_linux_${TARGETARCH}" go2rtc | ||||
| ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.2/go2rtc_linux_${TARGETARCH}" go2rtc | ||||
|  | ||||
| FROM wget AS tempio | ||||
| ARG TARGETARCH | ||||
| @@ -78,9 +78,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt | ||||
| RUN apt-get -qq update \ | ||||
|     && apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \ | ||||
|     && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ | ||||
|     && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ | ||||
|     && python3 get-pip.py "pip" \ | ||||
|     && pip3 install -r /requirements-ov.txt | ||||
|     && pip install -r /requirements-ov.txt | ||||
|  | ||||
| # Get OpenVino Model | ||||
| RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \ | ||||
| @@ -173,7 +172,6 @@ RUN apt-get -qq update \ | ||||
| RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 | ||||
|  | ||||
| RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ | ||||
|     && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ | ||||
|     && python3 get-pip.py "pip" | ||||
|  | ||||
| COPY docker/main/requirements.txt /requirements.txt | ||||
| @@ -237,7 +235,6 @@ ENV DEFAULT_FFMPEG_VERSION="7.0" | ||||
| ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0" | ||||
|  | ||||
| RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ | ||||
|     && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ | ||||
|     && python3 get-pip.py "pip" | ||||
|  | ||||
| RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ | ||||
| @@ -260,12 +257,12 @@ ENTRYPOINT ["/init"] | ||||
| CMD [] | ||||
|  | ||||
| HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ | ||||
|     CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 | ||||
|     CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 | ||||
|  | ||||
| # Frigate deps with Node.js and NPM for devcontainer | ||||
| FROM deps AS devcontainer | ||||
|  | ||||
| # Do not start the actual Frigate service on devcontainer as it will be started by VS Code | ||||
| # Do not start the actual Frigate service on devcontainer as it will be started by VSCode | ||||
| # But start a fake service for simulating the logs | ||||
| COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run | ||||
|  | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
|  | ||||
| set -euxo pipefail | ||||
|  | ||||
| NGINX_VERSION="1.27.4" | ||||
| NGINX_VERSION="1.25.3" | ||||
| VOD_MODULE_VERSION="1.31" | ||||
| SECURE_TOKEN_MODULE_VERSION="1.5" | ||||
| SET_MISC_MODULE_VERSION="v0.33" | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
|  | ||||
| set -euxo pipefail | ||||
|  | ||||
| hailo_version="4.21.0" | ||||
| hailo_version="4.20.0" | ||||
|  | ||||
| if [[ "${TARGETARCH}" == "amd64" ]]; then | ||||
|     arch="x86_64" | ||||
| @@ -10,5 +10,5 @@ elif [[ "${TARGETARCH}" == "arm64" ]]; then | ||||
|     arch="aarch64" | ||||
| fi | ||||
|  | ||||
| wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf - | ||||
| wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" | tar -C / -xzf - | ||||
| wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl" | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
|  | ||||
| set -euxo pipefail | ||||
|  | ||||
| s6_version="3.2.1.0" | ||||
| s6_version="3.1.5.0" | ||||
|  | ||||
| if [[ "${TARGETARCH}" == "amd64" ]]; then | ||||
|     s6_arch="x86_64" | ||||
|   | ||||
| @@ -7,10 +7,11 @@ starlette-context == 0.3.6 | ||||
| fastapi == 0.115.* | ||||
| uvicorn == 0.30.* | ||||
| slowapi == 0.1.* | ||||
| imutils == 0.5.* | ||||
| joserfc == 1.0.* | ||||
| pathvalidate == 3.2.* | ||||
| markupsafe == 3.0.* | ||||
| python-multipart == 0.0.12 | ||||
| python-multipart == 0.0.20 | ||||
| # General | ||||
| mypy == 1.6.1 | ||||
| onvif-zeep-async == 3.1.* | ||||
| @@ -31,7 +32,6 @@ norfair == 2.2.* | ||||
| setproctitle == 1.3.* | ||||
| ws4py == 0.5.* | ||||
| unidecode == 1.3.* | ||||
| titlecase == 2.4.* | ||||
| # Image Manipulation | ||||
| numpy == 1.26.* | ||||
| opencv-python-headless == 4.11.0.* | ||||
|   | ||||
| @@ -4,16 +4,44 @@ | ||||
|  | ||||
| set -o errexit -o nounset -o pipefail | ||||
|  | ||||
| # opt out of openvino telemetry | ||||
| if [ -e /usr/local/bin/opt_in_out ]; then | ||||
|   /usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1 | ||||
| fi | ||||
|  | ||||
| # Logs should be sent to stdout so that s6 can collect them | ||||
|  | ||||
| # Tell S6-Overlay not to restart this service | ||||
| s6-svc -O . | ||||
|  | ||||
| function migrate_db_path() { | ||||
|     # Find config file in yaml or yml, but prefer yaml | ||||
|     local config_file="${CONFIG_FILE:-"/config/config.yml"}" | ||||
|     local config_file_yaml="${config_file//.yml/.yaml}" | ||||
|     if [[ -f "${config_file_yaml}" ]]; then | ||||
|         config_file="${config_file_yaml}" | ||||
|     elif [[ ! -f "${config_file}" ]]; then | ||||
|         # Frigate will create the config file on startup | ||||
|         return 0 | ||||
|     fi | ||||
|     unset config_file_yaml | ||||
|  | ||||
|     # Use yq to check if database.path is set | ||||
|     local user_db_path | ||||
|     user_db_path=$(yq eval '.database.path' "${config_file}") | ||||
|  | ||||
|     if [[ "${user_db_path}" == "null" ]]; then | ||||
|         local previous_db_path="/media/frigate/frigate.db" | ||||
|         local new_db_dir="/config" | ||||
|         if [[ -f "${previous_db_path}" ]]; then | ||||
|             if mountpoint --quiet "${new_db_dir}"; then | ||||
|                 # /config is a mount point, move the db | ||||
|                 echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..." | ||||
|                 # Move all files that starts with frigate.db to the new directory | ||||
|                 mv -vf "${previous_db_path}"* "${new_db_dir}" | ||||
|             else | ||||
|                 echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" | ||||
|                 return 1 | ||||
|             fi | ||||
|         fi | ||||
|     fi | ||||
| } | ||||
|  | ||||
| function set_libva_version() { | ||||
|     local ffmpeg_path | ||||
|     ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) | ||||
| @@ -22,8 +50,8 @@ function set_libva_version() { | ||||
| } | ||||
|  | ||||
| echo "[INFO] Preparing Frigate..." | ||||
| migrate_db_path | ||||
| set_libva_version | ||||
|  | ||||
| echo "[INFO] Starting Frigate..." | ||||
|  | ||||
| cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" | ||||
|   | ||||
| @@ -61,7 +61,7 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then | ||||
|     echo "[INFO] Preparing new go2rtc config..." | ||||
|  | ||||
|     if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then | ||||
|         # Running as a Home Assistant Add-on, infer the IP address and port | ||||
|         # Running as a Home Assistant add-on, infer the IP address and port | ||||
|         get_ip_and_port_from_supervisor | ||||
|     fi | ||||
|  | ||||
|   | ||||
| @@ -79,11 +79,6 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain. | ||||
|         -keyout "$letsencrypt_path/privkey.pem" -out "$letsencrypt_path/fullchain.pem" 2>/dev/null | ||||
| fi | ||||
|  | ||||
| # build templates for optional FRIGATE_BASE_PATH environment variable | ||||
| python3 /usr/local/nginx/get_base_path.py | \ | ||||
|     tempio -template /usr/local/nginx/templates/base_path.gotmpl \ | ||||
|        -out /usr/local/nginx/conf/base_path.conf | ||||
|  | ||||
| # build templates for optional TLS support | ||||
| python3 /usr/local/nginx/get_tls_settings.py | \ | ||||
|     tempio  -template /usr/local/nginx/templates/listen.gotmpl \ | ||||
|   | ||||
| @@ -1,146 +0,0 @@ | ||||
| #!/command/with-contenv bash | ||||
| # shellcheck shell=bash | ||||
| # Do preparation tasks before starting the main services | ||||
|  | ||||
| set -o errexit -o nounset -o pipefail | ||||
|  | ||||
| function migrate_addon_config_dir() { | ||||
|     local home_assistant_config_dir="/homeassistant" | ||||
|  | ||||
|     if ! mountpoint --quiet "${home_assistant_config_dir}"; then | ||||
|         # Not running as a Home Assistant Add-on | ||||
|         return 0 | ||||
|     fi | ||||
|  | ||||
|     local config_dir="/config" | ||||
|     local new_config_file="${config_dir}/config.yml" | ||||
|     local new_config_file_yaml="${new_config_file//.yml/.yaml}" | ||||
|     if [[ -f "${new_config_file_yaml}" || -f "${new_config_file}" ]]; then | ||||
|         # Already migrated | ||||
|         return 0 | ||||
|     fi | ||||
|  | ||||
|     local old_config_file="${home_assistant_config_dir}/frigate.yml" | ||||
|     local old_config_file_yaml="${old_config_file//.yml/.yaml}" | ||||
|     if [[ -f "${old_config_file}" ]]; then | ||||
|         : | ||||
|     elif [[ -f "${old_config_file_yaml}" ]]; then | ||||
|         old_config_file="${old_config_file_yaml}" | ||||
|         new_config_file="${new_config_file_yaml}" | ||||
|     else | ||||
|         # Nothing to migrate | ||||
|         return 0 | ||||
|     fi | ||||
|     unset old_config_file_yaml new_config_file_yaml | ||||
|  | ||||
|     echo "[INFO] Starting migration from Home Assistant config dir to Add-on config dir..." >&2 | ||||
|  | ||||
|     local db_path | ||||
|     db_path=$(yq -r '.database.path' "${old_config_file}") | ||||
|     if [[ "${db_path}" == "null" ]]; then | ||||
|         db_path="${config_dir}/frigate.db" | ||||
|     fi | ||||
|     if [[ "${db_path}" == "${config_dir}/"* ]]; then | ||||
|         # replace /config/ prefix with /homeassistant/ | ||||
|         local old_db_path="${home_assistant_config_dir}/${db_path:8}" | ||||
|  | ||||
|         if [[ -f "${old_db_path}" ]]; then | ||||
|             local new_db_dir | ||||
|             new_db_dir="$(dirname "${db_path}")" | ||||
|             echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2 | ||||
|             mkdir -vp "${new_db_dir}" | ||||
|             mv -vf "${old_db_path}" "${new_db_dir}" | ||||
|             local db_file | ||||
|             for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do | ||||
|                 if [[ -f "${db_file}" ]]; then | ||||
|                     mv -vf "${db_file}" "${new_db_dir}" | ||||
|                 fi | ||||
|             done | ||||
|             unset db_file | ||||
|         fi | ||||
|     fi | ||||
|  | ||||
|     local config_entry | ||||
|     for config_entry in .model.path .model.labelmap_path .ffmpeg.path .mqtt.tls_ca_certs .mqtt.tls_client_cert .mqtt.tls_client_key; do | ||||
|         local config_entry_path | ||||
|         config_entry_path=$(yq -r "${config_entry}" "${old_config_file}") | ||||
|         if [[ "${config_entry_path}" == "${config_dir}/"* ]]; then | ||||
|             # replace /config/ prefix with /homeassistant/ | ||||
|             local old_config_entry_path="${home_assistant_config_dir}/${config_entry_path:8}" | ||||
|  | ||||
|             if [[ -f "${old_config_entry_path}" ]]; then | ||||
|                 local new_config_entry_entry | ||||
|                 new_config_entry_entry="$(dirname "${config_entry_path}")" | ||||
|                 echo "[INFO] Migrating ${config_entry} from '${old_config_entry_path}' to '${config_entry_path}'..." >&2 | ||||
|                 mkdir -vp "${new_config_entry_entry}" | ||||
|                 mv -vf "${old_config_entry_path}" "${config_entry_path}" | ||||
|             fi | ||||
|         fi | ||||
|     done | ||||
|  | ||||
|     local old_model_cache_path="${home_assistant_config_dir}/model_cache" | ||||
|     if [[ -d "${old_model_cache_path}" ]]; then | ||||
|         echo "[INFO] Migrating '${old_model_cache_path}' to '${config_dir}'..." >&2 | ||||
|         mv -f "${old_model_cache_path}" "${config_dir}" | ||||
|     fi | ||||
|  | ||||
|     echo "[INFO] Migrating other files from '${home_assistant_config_dir}' to '${config_dir}'..." >&2 | ||||
|     local file | ||||
|     for file in .exports .jwt_secret .timeline .vacuum go2rtc; do | ||||
|         file="${home_assistant_config_dir}/${file}" | ||||
|         if [[ -f "${file}" ]]; then | ||||
|             mv -vf "${file}" "${config_dir}" | ||||
|         fi | ||||
|     done | ||||
|  | ||||
|     echo "[INFO] Migrating config file from '${old_config_file}' to '${new_config_file}'..." >&2 | ||||
|     mv -vf "${old_config_file}" "${new_config_file}" | ||||
|  | ||||
|     echo "[INFO] Migration from Home Assistant config dir to Add-on config dir completed." >&2 | ||||
| } | ||||
|  | ||||
| function migrate_db_from_media_to_config() { | ||||
|     # Find config file in yml or yaml, but prefer yml | ||||
|     local config_file="${CONFIG_FILE:-"/config/config.yml"}" | ||||
|     local config_file_yaml="${config_file//.yml/.yaml}" | ||||
|     if [[ -f "${config_file}" ]]; then | ||||
|         : | ||||
|     elif [[ -f "${config_file_yaml}" ]]; then | ||||
|         config_file="${config_file_yaml}" | ||||
|     else | ||||
|         # Frigate will create the config file on startup | ||||
|         return 0 | ||||
|     fi | ||||
|     unset config_file_yaml | ||||
|  | ||||
|     local user_db_path | ||||
|     user_db_path=$(yq -r '.database.path' "${config_file}") | ||||
|     if [[ "${user_db_path}" == "null" ]]; then | ||||
|         local old_db_path="/media/frigate/frigate.db" | ||||
|         local new_db_dir="/config" | ||||
|         if [[ -f "${old_db_path}" ]]; then | ||||
|             echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2 | ||||
|             if mountpoint --quiet "${new_db_dir}"; then | ||||
|                 # /config is a mount point, move the db | ||||
|                 mv -vf "${old_db_path}" "${new_db_dir}" | ||||
|                 local db_file | ||||
|                 for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do | ||||
|                     if [[ -f "${db_file}" ]]; then | ||||
|                         mv -vf "${db_file}" "${new_db_dir}" | ||||
|                     fi | ||||
|                 done | ||||
|                 unset db_file | ||||
|             else | ||||
|                 echo "[ERROR] Trying to migrate the database path from '${old_db_path}' to '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" >&2 | ||||
|                 return 1 | ||||
|             fi | ||||
|         fi | ||||
|     fi | ||||
| } | ||||
|  | ||||
| # remove leftover from last run, not normally needed, but just in case | ||||
| # used by the docker healthcheck | ||||
| rm -f /dev/shm/.frigate-is-stopping | ||||
|  | ||||
| migrate_addon_config_dir | ||||
| migrate_db_from_media_to_config | ||||
| @@ -1 +0,0 @@ | ||||
| oneshot | ||||
| @@ -1 +0,0 @@ | ||||
| /etc/s6-overlay/s6-rc.d/prepare/run | ||||
| @@ -1,6 +1,6 @@ | ||||
| import json | ||||
| import os | ||||
| import sys | ||||
| from typing import Any | ||||
|  | ||||
| from ruamel.yaml import YAML | ||||
|  | ||||
| @@ -9,24 +9,28 @@ from frigate.const import ( | ||||
|     DEFAULT_FFMPEG_VERSION, | ||||
|     INCLUDED_FFMPEG_VERSIONS, | ||||
| ) | ||||
| from frigate.util.config import find_config_file | ||||
|  | ||||
| sys.path.remove("/opt/frigate") | ||||
|  | ||||
| yaml = YAML() | ||||
|  | ||||
| config_file = find_config_file() | ||||
| config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") | ||||
|  | ||||
| # Check if we can use .yaml instead of .yml | ||||
| config_file_yaml = config_file.replace(".yml", ".yaml") | ||||
| if os.path.isfile(config_file_yaml): | ||||
|     config_file = config_file_yaml | ||||
|  | ||||
| try: | ||||
|     with open(config_file) as f: | ||||
|         raw_config = f.read() | ||||
|  | ||||
|     if config_file.endswith((".yaml", ".yml")): | ||||
|         config: dict[str, Any] = yaml.load(raw_config) | ||||
|         config: dict[str, any] = yaml.load(raw_config) | ||||
|     elif config_file.endswith(".json"): | ||||
|         config: dict[str, Any] = json.loads(raw_config) | ||||
|         config: dict[str, any] = json.loads(raw_config) | ||||
| except FileNotFoundError: | ||||
|     config: dict[str, Any] = {} | ||||
|     config: dict[str, any] = {} | ||||
|  | ||||
| path = config.get("ffmpeg", {}).get("path", "default") | ||||
| if path == "default": | ||||
|   | ||||
| @@ -4,7 +4,6 @@ import json | ||||
| import os | ||||
| import sys | ||||
| from pathlib import Path | ||||
| from typing import Any | ||||
|  | ||||
| from ruamel.yaml import YAML | ||||
|  | ||||
| @@ -16,7 +15,6 @@ from frigate.const import ( | ||||
|     LIBAVFORMAT_VERSION_MAJOR, | ||||
| ) | ||||
| from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode | ||||
| from frigate.util.config import find_config_file | ||||
|  | ||||
| sys.path.remove("/opt/frigate") | ||||
|  | ||||
| @@ -31,20 +29,25 @@ if os.path.isdir("/run/secrets"): | ||||
|                 Path(os.path.join("/run/secrets", secret_file)).read_text().strip() | ||||
|             ) | ||||
|  | ||||
| config_file = find_config_file() | ||||
| config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") | ||||
|  | ||||
| # Check if we can use .yaml instead of .yml | ||||
| config_file_yaml = config_file.replace(".yml", ".yaml") | ||||
| if os.path.isfile(config_file_yaml): | ||||
|     config_file = config_file_yaml | ||||
|  | ||||
| try: | ||||
|     with open(config_file) as f: | ||||
|         raw_config = f.read() | ||||
|  | ||||
|     if config_file.endswith((".yaml", ".yml")): | ||||
|         config: dict[str, Any] = yaml.load(raw_config) | ||||
|         config: dict[str, any] = yaml.load(raw_config) | ||||
|     elif config_file.endswith(".json"): | ||||
|         config: dict[str, Any] = json.loads(raw_config) | ||||
|         config: dict[str, any] = json.loads(raw_config) | ||||
| except FileNotFoundError: | ||||
|     config: dict[str, Any] = {} | ||||
|     config: dict[str, any] = {} | ||||
|  | ||||
| go2rtc_config: dict[str, Any] = config.get("go2rtc", {}) | ||||
| go2rtc_config: dict[str, any] = config.get("go2rtc", {}) | ||||
|  | ||||
| # Need to enable CORS for go2rtc so the frigate integration / card work automatically | ||||
| if go2rtc_config.get("api") is None: | ||||
| @@ -54,7 +57,7 @@ elif go2rtc_config["api"].get("origin") is None: | ||||
|  | ||||
| # Need to set default location for HA config | ||||
| if go2rtc_config.get("hass") is None: | ||||
|     go2rtc_config["hass"] = {"config": "/homeassistant"} | ||||
|     go2rtc_config["hass"] = {"config": "/config"} | ||||
|  | ||||
| # we want to ensure that logs are easy to read | ||||
| if go2rtc_config.get("log") is None: | ||||
| @@ -66,6 +69,10 @@ elif go2rtc_config["log"].get("format") is None: | ||||
| if go2rtc_config.get("webrtc") is None: | ||||
|     go2rtc_config["webrtc"] = {} | ||||
|  | ||||
| # go2rtc should listen on 8555 tcp & udp by default | ||||
| if go2rtc_config["webrtc"].get("listen") is None: | ||||
|     go2rtc_config["webrtc"]["listen"] = ":8555" | ||||
|  | ||||
| if go2rtc_config["webrtc"].get("candidates") is None: | ||||
|     default_candidates = [] | ||||
|     # use internal candidate if it was discovered when running through the add-on | ||||
| @@ -77,15 +84,33 @@ if go2rtc_config["webrtc"].get("candidates") is None: | ||||
|  | ||||
|     go2rtc_config["webrtc"]["candidates"] = default_candidates | ||||
|  | ||||
| if go2rtc_config.get("rtsp", {}).get("username") is not None: | ||||
|     go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format( | ||||
|         **FRIGATE_ENV_VARS | ||||
|     ) | ||||
| # This prevents WebRTC from attempting to establish a connection to the internal | ||||
| # docker IPs which are not accessible from outside the container itself and just | ||||
| # wastes time during negotiation. Note that this is only necessary because | ||||
| # Frigate container doesn't run in host network mode. | ||||
| if go2rtc_config["webrtc"].get("filter") is None: | ||||
|     go2rtc_config["webrtc"]["filter"] = {"candidates": []} | ||||
| elif go2rtc_config["webrtc"]["filter"].get("candidates") is None: | ||||
|     go2rtc_config["webrtc"]["filter"]["candidates"] = [] | ||||
|  | ||||
| if go2rtc_config.get("rtsp", {}).get("password") is not None: | ||||
|     go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format( | ||||
|         **FRIGATE_ENV_VARS | ||||
|     ) | ||||
| # sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac | ||||
| # this means user does not need to specify audio codec when using restream | ||||
| # as source for frigate and the integration supports HLS playback | ||||
| if go2rtc_config.get("rtsp") is None: | ||||
|     go2rtc_config["rtsp"] = {"default_query": "mp4"} | ||||
| else: | ||||
|     if go2rtc_config["rtsp"].get("default_query") is None: | ||||
|         go2rtc_config["rtsp"]["default_query"] = "mp4" | ||||
|  | ||||
|     if go2rtc_config["rtsp"].get("username") is not None: | ||||
|         go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format( | ||||
|             **FRIGATE_ENV_VARS | ||||
|         ) | ||||
|  | ||||
|     if go2rtc_config["rtsp"].get("password") is not None: | ||||
|         go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format( | ||||
|             **FRIGATE_ENV_VARS | ||||
|         ) | ||||
|  | ||||
| # ensure ffmpeg path is set correctly | ||||
| path = config.get("ffmpeg", {}).get("path", "default") | ||||
| @@ -103,7 +128,7 @@ elif go2rtc_config["ffmpeg"].get("bin") is None: | ||||
|  | ||||
| # need to replace ffmpeg command when using ffmpeg4 | ||||
| if LIBAVFORMAT_VERSION_MAJOR < 59: | ||||
|     rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 10000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" | ||||
|     rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" | ||||
|     if go2rtc_config.get("ffmpeg") is None: | ||||
|         go2rtc_config["ffmpeg"] = {"rtsp": rtsp_args} | ||||
|     elif go2rtc_config["ffmpeg"].get("rtsp") is None: | ||||
| @@ -135,7 +160,7 @@ for name in go2rtc_config.get("streams", {}): | ||||
|  | ||||
| # add birdseye restream stream if enabled | ||||
| if config.get("birdseye", {}).get("restream", False): | ||||
|     birdseye: dict[str, Any] = config.get("birdseye") | ||||
|     birdseye: dict[str, any] = config.get("birdseye") | ||||
|  | ||||
|     input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}" | ||||
|     ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args', ''), input, '-rtsp_transport tcp -f rtsp {output}')}" | ||||
|   | ||||
| @@ -30,7 +30,7 @@ http { | ||||
|  | ||||
|     gzip on; | ||||
|     gzip_comp_level 6; | ||||
|     gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp; | ||||
|     gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp image/png image/gif image/jpeg image/jpg; | ||||
|     gzip_proxied no-cache no-store private expired auth; | ||||
|     gzip_vary on; | ||||
|  | ||||
| @@ -82,7 +82,7 @@ http { | ||||
|         aio on; | ||||
|  | ||||
|         # file upload size | ||||
|         client_max_body_size 20M; | ||||
|         client_max_body_size 10M; | ||||
|  | ||||
|         # https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool | ||||
|         vod_open_file_thread_pool default; | ||||
| @@ -96,7 +96,6 @@ http { | ||||
|         gzip_types application/vnd.apple.mpegurl; | ||||
|  | ||||
|         include auth_location.conf; | ||||
|         include base_path.conf; | ||||
|  | ||||
|         location /vod/ { | ||||
|             include auth_request.conf; | ||||
| @@ -300,29 +299,11 @@ http { | ||||
|                 add_header Cache-Control "public"; | ||||
|             } | ||||
|  | ||||
|             location /locales/ { | ||||
|                 access_log off; | ||||
|                 add_header Cache-Control "public"; | ||||
|             } | ||||
|  | ||||
|             location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ { | ||||
|                 access_log off; | ||||
|                 expires 1y; | ||||
|                 add_header Cache-Control "public"; | ||||
|                 default_type application/json; | ||||
|                 proxy_set_header Accept-Encoding ""; | ||||
|                 sub_filter_once off; | ||||
|                 sub_filter_types application/json; | ||||
|                 sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"'; | ||||
|                 sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/'; | ||||
|             } | ||||
|  | ||||
|             sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/'; | ||||
|             sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/'; | ||||
|             sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/'; | ||||
|             sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/'; | ||||
|             sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/'; | ||||
|             sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/'; | ||||
|             sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/'; | ||||
|             sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl'; | ||||
|             sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>'; | ||||
|   | ||||
| @@ -1,11 +0,0 @@ | ||||
| """Prints the base path as json to stdout.""" | ||||
|  | ||||
| import json | ||||
| import os | ||||
| from typing import Any | ||||
|  | ||||
| base_path = os.environ.get("FRIGATE_BASE_PATH", "") | ||||
|  | ||||
| result: dict[str, Any] = {"base_path": base_path} | ||||
|  | ||||
| print(json.dumps(result)) | ||||
| @@ -1,31 +1,30 @@ | ||||
| """Prints the tls config as json to stdout.""" | ||||
|  | ||||
| import json | ||||
| import sys | ||||
| from typing import Any | ||||
| import os | ||||
|  | ||||
| from ruamel.yaml import YAML | ||||
|  | ||||
| sys.path.insert(0, "/opt/frigate") | ||||
| from frigate.util.config import find_config_file | ||||
|  | ||||
| sys.path.remove("/opt/frigate") | ||||
|  | ||||
| yaml = YAML() | ||||
|  | ||||
| config_file = find_config_file() | ||||
| config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") | ||||
|  | ||||
| # Check if we can use .yaml instead of .yml | ||||
| config_file_yaml = config_file.replace(".yml", ".yaml") | ||||
| if os.path.isfile(config_file_yaml): | ||||
|     config_file = config_file_yaml | ||||
|  | ||||
| try: | ||||
|     with open(config_file) as f: | ||||
|         raw_config = f.read() | ||||
|  | ||||
|     if config_file.endswith((".yaml", ".yml")): | ||||
|         config: dict[str, Any] = yaml.load(raw_config) | ||||
|         config: dict[str, any] = yaml.load(raw_config) | ||||
|     elif config_file.endswith(".json"): | ||||
|         config: dict[str, Any] = json.loads(raw_config) | ||||
|         config: dict[str, any] = json.loads(raw_config) | ||||
| except FileNotFoundError: | ||||
|     config: dict[str, Any] = {} | ||||
|     config: dict[str, any] = {} | ||||
|  | ||||
| tls_config: dict[str, Any] = config.get("tls", {"enabled": True}) | ||||
| tls_config: dict[str, any] = config.get("tls", {"enabled": True}) | ||||
|  | ||||
| print(json.dumps(tls_config)) | ||||
|   | ||||
| @@ -1,19 +0,0 @@ | ||||
| {{ if .base_path }} | ||||
| location = {{ .base_path }} { | ||||
|     return 302 {{ .base_path }}/; | ||||
| } | ||||
|  | ||||
| location ^~ {{ .base_path }}/ { | ||||
|     # remove base_url from the path before passing upstream | ||||
|     rewrite ^{{ .base_path }}/(.*) /$1 break; | ||||
|  | ||||
|     proxy_pass $scheme://127.0.0.1:8971; | ||||
|     proxy_http_version 1.1; | ||||
|     proxy_set_header Upgrade $http_upgrade; | ||||
|     proxy_set_header Connection "upgrade"; | ||||
|     proxy_set_header Host $host; | ||||
|     proxy_set_header X-Ingress-Path {{ .base_path }}; | ||||
|  | ||||
|     access_log off; | ||||
| } | ||||
| {{ end }} | ||||
| @@ -13,7 +13,6 @@ RUN sed -i "/https:\/\//d" /requirements-wheels.txt | ||||
| RUN sed -i "/onnxruntime/d" /requirements-wheels.txt | ||||
| RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt | ||||
| RUN rm -rf /rk-wheels/opencv_python-* | ||||
| RUN rm -rf /rk-wheels/torch-* | ||||
|  | ||||
| FROM deps AS rk-frigate | ||||
| ARG TARGETARCH | ||||
| @@ -27,11 +26,9 @@ COPY --from=rootfs / / | ||||
| COPY docker/rockchip/COCO /COCO | ||||
| COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py | ||||
|  | ||||
| ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/librknnrt.so /usr/lib/ | ||||
| ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/ | ||||
|  | ||||
| ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-11/ffmpeg /usr/lib/ffmpeg/6.0/bin/ | ||||
| ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-11/ffprobe /usr/lib/ffmpeg/6.0/bin/ | ||||
| ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/7.1-1/ffmpeg /usr/lib/ffmpeg/7.0/bin/ | ||||
| ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/7.1-1/ffprobe /usr/lib/ffmpeg/7.0/bin/ | ||||
| ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/ | ||||
| ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/ | ||||
| ENV DEFAULT_FFMPEG_VERSION="6.0" | ||||
| ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}" | ||||
|   | ||||
| @@ -14,7 +14,7 @@ try: | ||||
|     with open("/config/conv2rknn.yaml", "r") as config_file: | ||||
|         configuration = yaml.safe_load(config_file) | ||||
| except FileNotFoundError: | ||||
|     raise Exception("Please place a config file at /config/conv2rknn.yaml") | ||||
|     raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml") | ||||
|  | ||||
| if configuration["config"] != None: | ||||
|     rknn_config = configuration["config"] | ||||
|   | ||||
| @@ -1,2 +1,2 @@ | ||||
| rknn-toolkit2 == 2.3.2 | ||||
| rknn-toolkit-lite2 == 2.3.2 | ||||
| rknn-toolkit2 == 2.3.0 | ||||
| rknn-toolkit-lite2 == 2.3.0 | ||||
| @@ -22,7 +22,7 @@ RUN apt update && \ | ||||
|  | ||||
| RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib | ||||
| RUN cd /opt/rocm-$ROCM/lib && \ | ||||
|     cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \ | ||||
|     cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \ | ||||
|     mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \ | ||||
|     cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib | ||||
| RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm | ||||
| @@ -39,7 +39,6 @@ WORKDIR /opt/frigate | ||||
| COPY --from=rootfs / / | ||||
|  | ||||
| RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ | ||||
|     && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ | ||||
|     && python3 get-pip.py "pip" --break-system-packages | ||||
| RUN python3 -m pip config set global.break-system-packages true | ||||
|  | ||||
| @@ -63,7 +62,6 @@ COPY --from=rocm /opt/rocm-dist/ / | ||||
| FROM deps-prelim AS rocm-prelim-hsa-override0 | ||||
| ENV HSA_ENABLE_SDMA=0 | ||||
| ENV MIGRAPHX_ENABLE_NHWC=1 | ||||
| ENV TF_ROCM_USE_IMMEDIATE_MODE=1 | ||||
|  | ||||
| COPY --from=rocm-dist / / | ||||
|  | ||||
|   | ||||
| @@ -6,29 +6,24 @@ ARG DEBIAN_FRONTEND=noninteractive | ||||
| # Globally set pip break-system-packages option to avoid having to specify it every time | ||||
| ARG PIP_BREAK_SYSTEM_PACKAGES=1 | ||||
|  | ||||
| FROM wheels AS trt-wheels | ||||
| FROM tensorrt-base AS frigate-tensorrt | ||||
| ARG PIP_BREAK_SYSTEM_PACKAGES | ||||
| ENV TRT_VER=8.6.1 | ||||
|  | ||||
| # Install TensorRT wheels | ||||
| COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt | ||||
| COPY docker/main/requirements-wheels.txt /requirements-wheels.txt | ||||
| RUN  pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt | ||||
|  | ||||
| FROM deps AS frigate-tensorrt | ||||
| ARG PIP_BREAK_SYSTEM_PACKAGES | ||||
|  | ||||
| RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ | ||||
|     pip3 uninstall -y onnxruntime-openvino tensorflow-cpu \ | ||||
|     && pip3 install -U /deps/trt-wheels/*.whl | ||||
|  | ||||
| COPY --from=rootfs / / | ||||
| COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d | ||||
| RUN ldconfig | ||||
| RUN pip3 install -U -r /requirements-tensorrt.txt && ldconfig | ||||
|  | ||||
| WORKDIR /opt/frigate/ | ||||
| COPY --from=rootfs / / | ||||
|  | ||||
| # Dev Container w/ TRT | ||||
| FROM devcontainer AS devcontainer-trt | ||||
|  | ||||
| COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so | ||||
| COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos | ||||
| COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda | ||||
| COPY docker/tensorrt/detector/rootfs/ / | ||||
| COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so | ||||
| RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ | ||||
|     pip3 install -U /deps/trt-wheels/*.whl | ||||
|   | ||||
| @@ -1,69 +1,17 @@ | ||||
| # syntax=docker/dockerfile:1.6 | ||||
| # syntax=docker/dockerfile:1.4 | ||||
|  | ||||
| # https://askubuntu.com/questions/972516/debian-frontend-environment-variable | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| ARG BASE_IMAGE | ||||
| ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3 | ||||
|  | ||||
| # Build TensorRT-specific library | ||||
| FROM ${TRT_BASE} AS trt-deps | ||||
|  | ||||
| ARG TARGETARCH | ||||
| ARG COMPUTE_LEVEL | ||||
|  | ||||
| RUN apt-get update \ | ||||
|     && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ | ||||
|     && rm -rf /var/lib/apt/lists/* | ||||
| RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ | ||||
|     /tensorrt_libyolo.sh | ||||
|  | ||||
| # COPY required individual CUDA deps | ||||
| RUN mkdir -p /usr/local/cuda-deps | ||||
| RUN if [ "$TARGETARCH" = "amd64" ]; then \ | ||||
|     cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \ | ||||
|     cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ && \ | ||||
|     cd /usr/local/cuda-deps/ && \ | ||||
|     for lib in libnvrtc.so.*; do \ | ||||
|     if [[ "$lib" =~ libnvrtc.so\.([0-9]+\.[0-9]+\.[0-9]+) ]]; then \ | ||||
|     version="${BASH_REMATCH[1]}"; \ | ||||
|     ln -sf "libnvrtc.so.$version" libnvrtc.so; \ | ||||
|     fi; \ | ||||
|     done && \ | ||||
|     for lib in libcurand.so.*; do \ | ||||
|     if [[ "$lib" =~ libcurand.so\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) ]]; then \ | ||||
|     version="${BASH_REMATCH[1]}"; \ | ||||
|     ln -sf "libcurand.so.$version" libcurand.so; \ | ||||
|     fi; \ | ||||
|     done; \ | ||||
|     fi | ||||
|  | ||||
| # Frigate w/ TensorRT Support as separate image | ||||
| FROM deps AS tensorrt-base | ||||
|  | ||||
| #Disable S6 Global timeout | ||||
| ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 | ||||
|  | ||||
| # COPY TensorRT Model Generation Deps | ||||
| COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so | ||||
| COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos | ||||
|  | ||||
| # COPY Individual CUDA deps folder | ||||
| COPY --from=trt-deps /usr/local/cuda-deps /usr/local/cuda | ||||
|  | ||||
| COPY docker/tensorrt/detector/rootfs/ / | ||||
| ENV YOLO_MODELS="" | ||||
|  | ||||
| HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ | ||||
|     CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 | ||||
|  | ||||
| FROM ${BASE_IMAGE} AS build-wheels | ||||
| ARG DEBIAN_FRONTEND | ||||
|  | ||||
| # Add deadsnakes PPA for python3.11 | ||||
| RUN apt-get -qq update && \ | ||||
|     apt-get -qq install -y --no-install-recommends \ | ||||
|     software-properties-common \ | ||||
|     && add-apt-repository ppa:deadsnakes/ppa | ||||
|         apt-get -qq install -y --no-install-recommends \ | ||||
|         software-properties-common \ | ||||
|         && add-apt-repository ppa:deadsnakes/ppa | ||||
|  | ||||
| # Use a separate container to build wheels to prevent build dependencies in final image | ||||
| RUN apt-get -qq update \ | ||||
| @@ -76,7 +24,6 @@ RUN apt-get -qq update \ | ||||
| RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 | ||||
|  | ||||
| RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ | ||||
|     && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \ | ||||
|     && python3 get-pip.py "pip" | ||||
|  | ||||
| FROM build-wheels AS trt-wheels | ||||
| @@ -99,11 +46,12 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t | ||||
|     && TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh | ||||
|  | ||||
| COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt | ||||
|  | ||||
| RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt | ||||
|  | ||||
| # See https://elinux.org/Jetson_Zoo#ONNX_Runtime | ||||
| ADD https://nvidia.box.com/shared/static/9yvw05k6u343qfnkhdv2x6xhygze0aq1.whl /trt-wheels/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl | ||||
| ADD https://nvidia.box.com/shared/static/9yvw05k6u343qfnkhdv2x6xhygze0aq1.whl /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl | ||||
|  | ||||
| RUN pip3 uninstall -y onnxruntime-openvino \ | ||||
|     && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \ | ||||
|     && pip3 install --no-deps /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl | ||||
|  | ||||
| FROM build-wheels AS trt-model-wheels | ||||
| ARG DEBIAN_FRONTEND | ||||
| @@ -144,12 +92,11 @@ RUN mkdir -p /etc/ld.so.conf.d && echo /usr/lib/ffmpeg/jetson/lib/ > /etc/ld.so. | ||||
| COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER | ||||
| RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ | ||||
|     --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ | ||||
|     pip3 uninstall -y onnxruntime \ | ||||
|     && pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \ | ||||
|     pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \ | ||||
|     && ldconfig | ||||
|  | ||||
| WORKDIR /opt/frigate/ | ||||
| COPY --from=rootfs / / | ||||
|  | ||||
| # Fixes "Error importing detector runtime: /usr/lib/aarch64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block" | ||||
| ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6 | ||||
| ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6 | ||||
|   | ||||
							
								
								
									
										44
									
								
								docker/tensorrt/Dockerfile.base
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								docker/tensorrt/Dockerfile.base
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| # syntax=docker/dockerfile:1.6 | ||||
|  | ||||
| # https://askubuntu.com/questions/972516/debian-frontend-environment-variable | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3 | ||||
|  | ||||
| # Build TensorRT-specific library | ||||
| FROM ${TRT_BASE} AS trt-deps | ||||
|  | ||||
| ARG TARGETARCH | ||||
| ARG COMPUTE_LEVEL | ||||
|  | ||||
| RUN apt-get update \ | ||||
|     && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ | ||||
|     && rm -rf /var/lib/apt/lists/* | ||||
| RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ | ||||
|     /tensorrt_libyolo.sh | ||||
|  | ||||
| # COPY required individual CUDA deps | ||||
| RUN mkdir -p /usr/local/cuda-deps | ||||
| RUN if [ "$TARGETARCH" = "amd64" ]; then \ | ||||
|       cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \ | ||||
|       cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ ; \ | ||||
|     fi | ||||
|  | ||||
| # Frigate w/ TensorRT Support as separate image | ||||
| FROM deps AS tensorrt-base | ||||
|  | ||||
| #Disable S6 Global timeout | ||||
| ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 | ||||
|  | ||||
| # COPY TensorRT Model Generation Deps | ||||
| COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so | ||||
| COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos | ||||
|  | ||||
| # COPY Individual CUDA deps folder | ||||
| COPY --from=trt-deps /usr/local/cuda-deps /usr/local/cuda | ||||
|  | ||||
| COPY docker/tensorrt/detector/rootfs/ / | ||||
| ENV YOLO_MODELS="" | ||||
|  | ||||
| HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ | ||||
|     CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 | ||||
| @@ -1,6 +1,8 @@ | ||||
| /usr/local/lib | ||||
| /usr/local/cuda | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/curand/lib/ | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib/ | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib | ||||
| /usr/local/lib/python3.11/dist-packages/tensorrt | ||||
| /usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib | ||||
| @@ -1,18 +1,17 @@ | ||||
| # NVidia TensorRT Support (amd64 only) | ||||
| --extra-index-url 'https://pypi.nvidia.com' | ||||
| cython==3.0.*; platform_machine == 'x86_64' | ||||
| nvidia_cuda_cupti_cu12==12.5.82; platform_machine == 'x86_64' | ||||
| nvidia-cublas-cu12==12.5.3.*; platform_machine == 'x86_64' | ||||
| nvidia-cudnn-cu12==9.3.0.*; platform_machine == 'x86_64' | ||||
| nvidia-cufft-cu12==11.2.3.*; platform_machine == 'x86_64' | ||||
| nvidia-curand-cu12==10.3.6.*; platform_machine == 'x86_64' | ||||
| nvidia_cuda_nvcc_cu12==12.5.82; platform_machine == 'x86_64' | ||||
| nvidia-cuda-nvrtc-cu12==12.5.82; platform_machine == 'x86_64' | ||||
| nvidia_cuda_runtime_cu12==12.5.82; platform_machine == 'x86_64' | ||||
| nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64' | ||||
| nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64' | ||||
| nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64' | ||||
| nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64' | ||||
| numpy < 1.24; platform_machine == 'x86_64' | ||||
| tensorrt == 8.6.1; platform_machine == 'x86_64' | ||||
| tensorrt_bindings == 8.6.1; platform_machine == 'x86_64' | ||||
| cuda-python == 11.8.*; platform_machine == 'x86_64' | ||||
| cython == 3.0.*; platform_machine == 'x86_64' | ||||
| nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' | ||||
| nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64' | ||||
| nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' | ||||
| nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' | ||||
| nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64' | ||||
| nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' | ||||
| nvidia-cufft-cu12==11.*; platform_machine == 'x86_64' | ||||
| onnx==1.16.*; platform_machine == 'x86_64' | ||||
| onnxruntime-gpu==1.20.*; platform_machine == 'x86_64' | ||||
| protobuf==3.20.3; platform_machine == 'x86_64' | ||||
|   | ||||
| @@ -79,13 +79,21 @@ target "trt-deps" { | ||||
|   inherits = ["_build_args"] | ||||
| } | ||||
|  | ||||
| target "tensorrt-base" { | ||||
|   dockerfile = "docker/tensorrt/Dockerfile.base" | ||||
|   context = "." | ||||
|   contexts = { | ||||
|     deps = "target:deps", | ||||
|   } | ||||
|   inherits = ["_build_args"] | ||||
| } | ||||
|  | ||||
| target "tensorrt" { | ||||
|   dockerfile = "docker/tensorrt/Dockerfile.${ARCH}" | ||||
|   context = "." | ||||
|   contexts = { | ||||
|     wget = "target:wget", | ||||
|     wheels = "target:wheels", | ||||
|     deps = "target:deps", | ||||
|     tensorrt-base = "target:tensorrt-base", | ||||
|     rootfs = "target:rootfs" | ||||
|   } | ||||
|   target = "frigate-tensorrt" | ||||
|   | ||||
| @@ -44,7 +44,7 @@ go2rtc: | ||||
|  | ||||
| ### `environment_vars` | ||||
|  | ||||
| This section can be used to set environment variables for those unable to modify the environment of the container, like within Home Assistant OS. | ||||
| This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS) | ||||
|  | ||||
| Example: | ||||
|  | ||||
| @@ -172,38 +172,6 @@ listen [::]:8971 ipv6only=off ssl; | ||||
| listen [::]:5000 ipv6only=off; | ||||
| ``` | ||||
|  | ||||
| ## Base path | ||||
|  | ||||
| By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing. | ||||
|  | ||||
| ### Set Base Path via HTTP Header | ||||
| The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy. | ||||
|  | ||||
| For example, in Nginx: | ||||
| ``` | ||||
| location /frigate { | ||||
|     proxy_set_header X-Ingress-Path /frigate; | ||||
|     proxy_pass http://frigate_backend; | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Set Base Path via Environment Variable | ||||
| When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file. | ||||
|  | ||||
| For example: | ||||
| ``` | ||||
| services: | ||||
|   frigate: | ||||
|     image: blakeblackshear/frigate:latest | ||||
|     environment: | ||||
|       - FRIGATE_BASE_PATH=/frigate | ||||
| ``` | ||||
|  | ||||
| This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http): | ||||
| ``` | ||||
| tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate | ||||
| ``` | ||||
|  | ||||
| ## Custom Dependencies | ||||
|  | ||||
| ### Custom ffmpeg build | ||||
| @@ -218,7 +186,7 @@ To do this: | ||||
|  | ||||
| ### Custom go2rtc version | ||||
|  | ||||
| Frigate currently includes go2rtc v1.9.9, there may be certain cases where you want to run a different version of go2rtc. | ||||
| Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc. | ||||
|  | ||||
| To do this: | ||||
|  | ||||
|   | ||||
| @@ -43,28 +43,13 @@ Restarting Frigate will reset the rate limits. | ||||
|  | ||||
| If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated. | ||||
|  | ||||
| If you are running a reverse proxy in the same Docker Compose file as Frigate, here is an example of how your auth config might look: | ||||
| If you are running a reverse proxy in the same docker compose file as Frigate, here is an example of how your auth config might look: | ||||
|  | ||||
| ```yaml | ||||
| auth: | ||||
|   failed_login_rate_limit: "1/second;5/minute;20/hour" | ||||
|   trusted_proxies: | ||||
|     - 172.18.0.0/16 # <---- this is the subnet for the internal Docker Compose network | ||||
| ``` | ||||
|  | ||||
| ## Session Length | ||||
|  | ||||
| The default session length for user authentication in Frigate is 24 hours. This setting determines how long a user's authenticated session remains active before a token refresh is required — otherwise, the user will need to log in again. | ||||
|  | ||||
| While the default provides a balance of security and convenience, you can customize this duration to suit your specific security requirements and user experience preferences. The session length is configured in seconds. | ||||
|  | ||||
| The default value of `86400` will expire the authentication session after 24 hours. Some other examples: | ||||
| - `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout. | ||||
| - `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days. | ||||
|  | ||||
| ```yaml | ||||
| auth: | ||||
|   session_length: 86400 | ||||
|     - 172.18.0.0/16 # <---- this is the subnet for the internal docker compose network | ||||
| ``` | ||||
|  | ||||
| ## JWT Token Secret | ||||
| @@ -81,7 +66,7 @@ Frigate looks for a JWT token secret in the following order: | ||||
|  | ||||
| 1. An environment variable named `FRIGATE_JWT_SECRET` | ||||
| 2. A docker secret named `FRIGATE_JWT_SECRET` in `/run/secrets/` | ||||
| 3. A `jwt_secret` option from the Home Assistant Add-on options | ||||
| 3. A `jwt_secret` option from the Home Assistant Addon options | ||||
| 4. A `.jwt_secret` file in the config directory | ||||
|  | ||||
| If no secret is found on startup, Frigate generates one and stores it in a `.jwt_secret` file in the config directory. | ||||
| @@ -92,7 +77,7 @@ Changing the secret will invalidate current tokens. | ||||
|  | ||||
| Frigate can be configured to leverage features of common upstream authentication proxies such as Authelia, Authentik, oauth2_proxy, or traefik-forward-auth. | ||||
|  | ||||
| If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication as there is no correspondence between users in Frigate's database and users authenticated via the proxy. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret. | ||||
| If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret. | ||||
|  | ||||
| Here is an example of how to disable Frigate's authentication and also ensure the requests come only from your known proxy. | ||||
|  | ||||
| @@ -112,27 +97,18 @@ python3 -c 'import secrets; print(secrets.token_hex(64))' | ||||
|  | ||||
| ### Header mapping | ||||
|  | ||||
| If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Groups` values. Header names are not case sensitive. Multiple values can be included in the role header. Frigate expects that the character separating the roles is a comma, but this can be specified using the `separator` config entry. | ||||
| If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Role` values. Header names are not case sensitive. | ||||
|  | ||||
| ```yaml | ||||
| proxy: | ||||
|   ... | ||||
|   separator: "|" # This value defaults to a comma, but Authentik uses a pipe, for example. | ||||
|   header_map: | ||||
|     user: x-forwarded-user | ||||
|     role: x-forwarded-groups | ||||
|     role: x-forwarded-role | ||||
| ``` | ||||
|  | ||||
| Frigate supports both `admin` and `viewer` roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization. | ||||
|  | ||||
| A default role can be provided. Any value in the mapped `role` header will override the default. | ||||
|  | ||||
| ```yaml | ||||
| proxy: | ||||
|   ... | ||||
|   default_role: viewer | ||||
| ``` | ||||
|  | ||||
| #### Port Considerations | ||||
|  | ||||
| **Authenticated Port (8971)** | ||||
|   | ||||
| @@ -1,31 +0,0 @@ | ||||
| --- | ||||
| id: bird_classification | ||||
| title: Bird Classification | ||||
| --- | ||||
|  | ||||
| Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. | ||||
|  | ||||
| ## Minimum System Requirements | ||||
|  | ||||
| Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself. | ||||
|  | ||||
| ## Model | ||||
|  | ||||
| The classification model used is the MobileNet INat Bird Classification, [available identifiers can be found here.](https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt) | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| Bird classification is disabled by default, it must be enabled in your config file before it can be used. Bird classification is a global configuration setting. | ||||
|  | ||||
| ```yaml | ||||
| classification: | ||||
|   bird: | ||||
|     enabled: true | ||||
| ``` | ||||
|  | ||||
| ## Advanced Configuration | ||||
|  | ||||
| Fine-tune bird classification with these optional parameters: | ||||
|  | ||||
| - `threshold`: Classification confidence score required to set the sub label on the object. | ||||
|   - Default: `0.9`. | ||||
| @@ -4,7 +4,7 @@ In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads | ||||
|  | ||||
| Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras. | ||||
|  | ||||
| Birdseye can also be used in Home Assistant dashboards, cast to media devices, etc. | ||||
| Birdseye can also be used in HomeAssistant dashboards, cast to media devices, etc. | ||||
|  | ||||
| ## Birdseye Behavior | ||||
|  | ||||
|   | ||||
| @@ -15,17 +15,6 @@ Many cameras support encoding options which greatly affect the live view experie | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ## H.265 Cameras via Safari | ||||
|  | ||||
| Some cameras support h265 with different formats, but Safari only supports the annexb format. When using h265 camera streams for recording with devices that use the Safari browser, the `apple_compatibility` option should be used. | ||||
|  | ||||
| ```yaml | ||||
| cameras: | ||||
|   h265_cam: # <------ Doesn't matter what the camera is called | ||||
|     ffmpeg: | ||||
|       apple_compatibility: true # <- Adds compatibility with MacOS and iPhone | ||||
| ``` | ||||
|  | ||||
| ## MJPEG Cameras | ||||
|  | ||||
| Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg. | ||||
| @@ -230,7 +219,7 @@ go2rtc: | ||||
|       - rtspx://192.168.1.1:7441/abcdefghijk | ||||
| ``` | ||||
|  | ||||
| [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-rtsp) | ||||
| [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp) | ||||
|  | ||||
| In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. | ||||
|  | ||||
| @@ -243,38 +232,3 @@ ffmpeg: | ||||
| ### TP-Link VIGI Cameras | ||||
|  | ||||
| TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`. | ||||
|  | ||||
| ## USB Cameras (aka Webcams) | ||||
|  | ||||
| To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support: | ||||
|  | ||||
| - Preparation outside of Frigate: | ||||
|   - Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0` | ||||
|   - Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back. | ||||
|   - If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed. | ||||
|  | ||||
| - In your Frigate Configuration File, add the go2rtc stream and roles as appropriate: | ||||
|  | ||||
| ``` | ||||
| go2rtc: | ||||
|   streams: | ||||
|     usb_camera: | ||||
|       - "ffmpeg:device?video=0&video_size=1024x576#video=h264"  | ||||
|  | ||||
| cameras: | ||||
|   usb_camera: | ||||
|     enabled: true | ||||
|     ffmpeg: | ||||
|       inputs: | ||||
|         - path: rtsp://127.0.0.1:8554/usb_camera | ||||
|           input_args: preset-rtsp-restream | ||||
|           roles: | ||||
|             - detect | ||||
|             - record | ||||
|     detect: | ||||
|       enabled: false # <---- disable detection until you have a working camera feed | ||||
|       width: 1024 | ||||
|       height: 576 | ||||
| ``` | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -97,12 +97,9 @@ This list of working and non-working PTZ cameras is based on user feedback. | ||||
| | Amcrest ASH21                |      ✅      |      ❌      | ONVIF service port: 80                                                                                                                          | | ||||
| | Amcrest IP4M-S2112EW-AI      |      ✅      |      ❌      | FOV relative movement not supported.                                                                                                            | | ||||
| | Amcrest IP5M-1190EW          |      ✅      |      ❌      | ONVIF Port: 80. FOV relative movement not supported.                                                                                            | | ||||
| | Annke CZ504                  |      ✅      |      ✅      | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" | | ||||
| | Ctronics PTZ                 |      ✅      |      ❌      |                                                                                                                                                 | | ||||
| | Dahua                        |      ✅      |      ✅      | Some low-end Dahuas (lite series, among others) have been reported to not support autotracking                                                  | | ||||
| | Dahua                        |      ✅      |      ✅      |                                                                                                                                                 | | ||||
| | Dahua DH-SD2A500HB           |      ✅      |      ❌      |                                                                                                                                                 | | ||||
| | Dahua DH-SD49825GB-HNR       |      ✅      |      ✅      |                                                                                                                                                 | | ||||
| | Dahua DH-P5AE-PV             |      ❌      |      ❌      |                                                                                                                                                 | | ||||
| | Foscam R5                    |      ✅      |      ❌      |                                                                                                                                                 | | ||||
| | Hanwha XNP-6550RH            |      ✅      |      ❌      |                                                                                                                                                 | | ||||
| | Hikvision                    |      ✅      |      ❌      | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | | ||||
|   | ||||
| @@ -3,87 +3,20 @@ id: face_recognition | ||||
| title: Face Recognition | ||||
| --- | ||||
|  | ||||
| Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. | ||||
| Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications. | ||||
|  | ||||
| ## Model Requirements | ||||
|  | ||||
| ### Face Detection | ||||
|  | ||||
| When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient. | ||||
|  | ||||
| When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| Frigate needs to first detect a `person` before it can detect and recognize a face. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ### Face Recognition | ||||
|  | ||||
| Frigate has support for two face recognition model types: | ||||
|  | ||||
| - **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. | ||||
| - **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. | ||||
|  | ||||
| In both cases, a lightweight face landmark detection model is also used to align faces before running recognition. | ||||
|  | ||||
| All of these features run locally on your system. | ||||
|  | ||||
| ## Minimum System Requirements | ||||
|  | ||||
| The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently. | ||||
|  | ||||
| The `large` model is optimized for accuracy, an integrated or discrete GPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. | ||||
| Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| Face recognition is disabled by default, face recognition must be enabled in the UI or in your config file before it can be used. Face recognition is a global configuration setting. | ||||
| Face recognition is disabled by default, face recognition must be enabled in your config file before it can be used. Face recognition is a global configuration setting. | ||||
|  | ||||
| ```yaml | ||||
| face_recognition: | ||||
|   enabled: true | ||||
| ``` | ||||
|  | ||||
| Like the other real-time processors in Frigate, face recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements. | ||||
|  | ||||
| ## Advanced Configuration | ||||
|  | ||||
| Fine-tune face recognition with these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled` and `min_area`. | ||||
|  | ||||
| ### Detection | ||||
|  | ||||
| - `detection_threshold`: Face detection confidence score required before recognition runs: | ||||
|   - Default: `0.7` | ||||
|   - Note: This is field only applies to the standalone face detection model, `min_score` should be used to filter for models that have face detection built in. | ||||
| - `min_area`: Defines the minimum size (in pixels) a face must be before recognition runs. | ||||
|   - Default: `500` pixels. | ||||
|   - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant faces. | ||||
|  | ||||
| ### Recognition | ||||
|  | ||||
| - `model_size`: Which model size to use, options are `small` or `large` | ||||
| - `unknown_score`: Min score to mark a person as a potential match, matches at or below this will be marked as unknown. | ||||
|   - Default: `0.8`. | ||||
| - `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label. | ||||
|   - Default: `0.9`. | ||||
| - `min_faces`: Min face recognitions for the sub label to be applied to the person object. | ||||
|   - Default: `1` | ||||
| - `save_attempts`: Number of images of recognized faces to save for training. | ||||
|   - Default: `100`. | ||||
| - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. | ||||
|   - Default: `True`. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| Follow these steps to begin: | ||||
|  | ||||
| 1. **Enable face recognition** in your configuration file and restart Frigate. | ||||
| 2. **Upload one face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI. Read below for the best practices on expanding your training set. | ||||
| 3. When Frigate detects and attempts to recognize a face, it will appear in the **Train** tab of the Face Library, along with its associated recognition confidence. | ||||
| 4. From the **Train** tab, you can **assign the face** to a new or existing person to improve recognition accuracy for the future. | ||||
|  | ||||
| ## Creating a Robust Training Set | ||||
| ## Dataset | ||||
|  | ||||
| The number of images needed for a sufficient training set for face recognition varies depending on several factors: | ||||
|  | ||||
| @@ -92,9 +25,11 @@ The number of images needed for a sufficient training set for face recognition v | ||||
|  | ||||
| However, here are some general guidelines: | ||||
|  | ||||
| - Minimum: For basic face recognition tasks, a minimum of 5-10 images per person is often recommended. | ||||
| - Recommended: For more robust and accurate systems, 20-30 images per person is a good starting point. | ||||
| - Ideal: For optimal performance, especially in challenging conditions, 50-100 images per person can be beneficial. | ||||
| - Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended. | ||||
| - Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point. | ||||
| - Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial. | ||||
|  | ||||
| ## Creating a Robust Training Set | ||||
|  | ||||
| The accuracy of face recognition is heavily dependent on the quality of data given to it for training. It is recommended to build the face training library in phases. | ||||
|  | ||||
| @@ -103,106 +38,19 @@ The accuracy of face recognition is heavily dependent on the quality of data giv | ||||
| When choosing images to include in the face training set it is recommended to always follow these recommendations: | ||||
|  | ||||
| - If it is difficult to make out details in a persons face it will not be helpful in training. | ||||
| - Avoid images with extreme under/over-exposure. | ||||
| - Avoid images with under/over-exposure. | ||||
| - Avoid blurry / pixelated images. | ||||
| - Avoid training on infrared (gray-scale). The models are trained on color images and will be able to extract features from gray-scale images. | ||||
| - Using images of people wearing hats / sunglasses may confuse the model. | ||||
| - Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid over-fitting. | ||||
| - Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the training. | ||||
| - Do not upload too many images at the same time, it is recommended to train 4-6 images for each person each day so it is easier to know if the previously added images helped or hurt performance. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ### Understanding the Train Tab | ||||
|  | ||||
| The Train tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching. | ||||
|  | ||||
| Each face image is labeled with a name (or `Unknown`) along with the confidence score of the recognition attempt. While each image can be used to train the system for a specific person, not all images are suitable for training. | ||||
|  | ||||
| Refer to the guidelines below for best practices on selecting images for training. | ||||
|  | ||||
| ### Step 1 - Building a Strong Foundation | ||||
|  | ||||
| When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-5 photos containing just this person's face. It is important that the person's face in the photo is front-facing and not turned, this will ensure a good starting point. | ||||
| When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-2 photos taken by a smartphone for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point. | ||||
|  | ||||
| Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are front-facing. Ignore images from cameras that recognize faces from an angle. Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have over-fitting. | ||||
|  | ||||
| You do not want to train images that are 90%+ as these are already being confidently recognized. In this step the goal is to train on clear, lower scoring front-facing images until the majority of front-facing images for a given person are consistently recognized correctly. Then it is time to move on to step 2. | ||||
| Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step. | ||||
|  | ||||
| ### Step 2 - Expanding The Dataset | ||||
|  | ||||
| Once front-facing images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone, and you still only want to train on images that score lower. | ||||
|  | ||||
| ## FAQ | ||||
|  | ||||
| ### How do I debug Face Recognition issues? | ||||
|  | ||||
| Start with the [Usage](#usage) section and re-read the [Model Requirements](#model-requirements) above. | ||||
|  | ||||
| 1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Train tab in the Frigate UI's Face Library. | ||||
|  | ||||
|    If you are using a Frigate+ or `face` detecting model: | ||||
|  | ||||
|    - Watch the debug view (Settings --> Debug) to ensure that `face` is being detected along with `person`. | ||||
|    - You may need to adjust the `min_score` for the `face` object if faces are not being detected. | ||||
|  | ||||
|    If you are **not** using a Frigate+ or `face` detecting model: | ||||
|  | ||||
|    - Check your `detect` stream resolution and ensure it is sufficiently high enough to capture face details on `person` objects. | ||||
|    - You may need to lower your `detection_threshold` if faces are not being detected. | ||||
|  | ||||
| 2. Any detected faces will then be _recognized_. | ||||
|  | ||||
|    - Make sure you have trained at least one face per the recommendations above. | ||||
|    - Adjust `recognition_threshold` settings per the suggestions [above](#advanced-configuration). | ||||
|  | ||||
| ### Detection does not work well with blurry images? | ||||
|  | ||||
| Accuracy is definitely a going to be improved with higher quality cameras / streams. It is important to look at the DORI (Detection Observation Recognition Identification) range of your camera, if that specification is posted. This specification explains the distance from the camera that a person can be detected, observed, recognized, and identified. The identification range is the most relevant here, and the distance listed by the camera is the furthest that face recognition will realistically work. | ||||
|  | ||||
| ### Why can't I bulk upload photos? | ||||
|  | ||||
| It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance. | ||||
|  | ||||
| ### Why can't I bulk reprocess faces? | ||||
|  | ||||
| Face embedding models work by breaking apart faces into different features. This means that when reprocessing an image, only images from a similar angle will have its score affected. | ||||
|  | ||||
| ### Why do unknown people score similarly to known people? | ||||
|  | ||||
| This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting: | ||||
|  | ||||
| - If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images. | ||||
| - When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations. | ||||
| - By training on a diverse set of images, the algorithm becomes less sensitive to minor variations and noise in the input image. | ||||
|  | ||||
| Review your face collections and remove most of the unclear or low-quality images. Then, use the **Reprocess** button on each face in the **Train** tab to evaluate how the changes affect recognition scores. | ||||
|  | ||||
| Avoid training on images that already score highly, as this can lead to over-fitting. Instead, focus on relatively clear images that score lower - ideally with different lighting, angles, and conditions—to help the model generalize more effectively. | ||||
|  | ||||
| ### Frigate misidentified a face. Can I tell it that a face is "not" a specific person? | ||||
|  | ||||
| No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person. | ||||
| For more guidance, refer to the section above on improving recognition accuracy. | ||||
|  | ||||
| ### I see scores above the threshold in the train tab, but a sub label wasn't assigned? | ||||
|  | ||||
| The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results. | ||||
|  | ||||
| ### Can I use other face recognition software like DoubleTake at the same time as the built in face recognition? | ||||
|  | ||||
| No, using another face recognition service will interfere with Frigate's built in face recognition. When using double-take the sub_label feature must be disabled if the built in face recognition is also desired. | ||||
|  | ||||
| ### Does face recognition run on the recording stream? | ||||
|  | ||||
| Face recognition does not run on the recording stream, this would be suboptimal for many reasons: | ||||
|  | ||||
| 1. The latency of accessing the recordings means the notifications would not include the names of recognized people because recognition would not complete until after. | ||||
| 2. The embedding models used run on a set image size, so larger images will be scaled down to match this anyway. | ||||
| 3. Motion clarity is much more important than extra pixels, over-compression and motion blur are much more detrimental to results than resolution. | ||||
|  | ||||
| ### I get an unknown error when taking a photo directly with my iPhone | ||||
|  | ||||
| By default iOS devices will use HEIC (High Efficiency Image Container) for images, but this format is not supported for uploads. Choosing `large` as the format instead of `original` will use JPG which will work correctly. | ||||
|  | ||||
| ### How can I delete the face database and start over? | ||||
|  | ||||
| Frigate does not store anything in its database related to face recognition. You can simply delete all of your faces through the Frigate UI or remove the contents of the `/media/frigate/clips/faces` directory. | ||||
| Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone. | ||||
|   | ||||
| @@ -9,7 +9,7 @@ Some presets of FFmpeg args are provided by default to make the configuration ea | ||||
|  | ||||
| It is highly recommended to use hwaccel presets in the config. These presets not only replace the longer args, but they also give Frigate hints of what hardware is available and allows Frigate to make other optimizations using the GPU such as when encoding the birdseye restream or when scaling a stream that has a size different than the native stream size. | ||||
|  | ||||
| See [the hwaccel docs](/configuration/hardware_acceleration_video.md) for more info on how to setup hwaccel for your GPU / iGPU. | ||||
| See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on how to setup hwaccel for your GPU / iGPU. | ||||
|  | ||||
| | Preset                | Usage                          | Other Notes                                           | | ||||
| | --------------------- | ------------------------------ | ----------------------------------------------------- | | ||||
| @@ -71,11 +71,11 @@ cameras: | ||||
|  | ||||
| Output args presets help make the config more readable and handle use cases for different types of streams to ensure consistent recordings. | ||||
|  | ||||
| | Preset                           | Usage                             | Other Notes                                                                                                                                                                                              | | ||||
| | -------------------------------- | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ||||
| | preset-record-generic            | Record WITHOUT audio              | If your camera doesn’t have audio, or if you don’t want to record audio, use this option                                                                                                                 | | ||||
| | preset-record-generic-audio-copy | Record WITH original audio        | Use this to enable audio in recordings                                                                                                                                                                   | | ||||
| | preset-record-generic-audio-aac  | Record WITH transcoded aac audio  | This is the default when no option is specified. Use it to transcode audio to AAC. If the source is already in AAC format, use preset-record-generic-audio-copy instead to avoid unnecessary re-encoding | | ||||
| | preset-record-mjpeg              | Record an mjpeg stream            | Recommend restreaming mjpeg stream instead                                                                                                                                                               | | ||||
| | preset-record-jpeg               | Record live jpeg                  | Recommend restreaming live jpeg instead                                                                                                                                                                  | | ||||
| | preset-record-ubiquiti           | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio                                                                                                                                                              | | ||||
| | Preset                           | Usage                             | Other Notes                                                                                                                          | | ||||
| | -------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | | ||||
| | preset-record-generic            | Record WITHOUT audio              | This is the default when nothing is specified                                                                                        | | ||||
| | preset-record-generic-audio-copy | Record WITH original audio        | Use this to enable audio in recordings                                                                                               | | ||||
| | preset-record-generic-audio-aac  | Record WITH transcoded aac audio  | Use this to transcode to aac audio. If your source is already aac, use preset-record-generic-audio-copy instead to avoid re-encoding | | ||||
| | preset-record-mjpeg              | Record an mjpeg stream            | Recommend restreaming mjpeg stream instead                                                                                           | | ||||
| | preset-record-jpeg               | Record live jpeg                  | Recommend restreaming live jpeg instead                                                                                              | | ||||
| | preset-record-ubiquiti           | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio                                                                                          | | ||||
|   | ||||
| @@ -21,23 +21,12 @@ genai: | ||||
|   model: gemini-1.5-flash | ||||
|  | ||||
| cameras: | ||||
|   front_camera:  | ||||
|     genai: | ||||
|       enabled: True # <- enable GenAI for your front camera | ||||
|       use_snapshot: True | ||||
|       objects: | ||||
|         - person | ||||
|       required_zones: | ||||
|         - steps | ||||
|   front_camera: ... | ||||
|   indoor_camera: | ||||
|     genai:  | ||||
|       enabled: False # <- disable GenAI for your indoor camera | ||||
|     genai: # <- disable GenAI for your indoor camera | ||||
|       enabled: False | ||||
| ``` | ||||
|  | ||||
| By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones. | ||||
|  | ||||
| Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. | ||||
|  | ||||
| ## Ollama | ||||
|  | ||||
| :::warning | ||||
| @@ -178,7 +167,7 @@ Analyze the sequence of images containing the {label}. Focus on the likely inten | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt. | ||||
| Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -196,7 +185,9 @@ genai: | ||||
|     car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company." | ||||
| ``` | ||||
|  | ||||
| Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.  | ||||
| Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones. | ||||
|  | ||||
| Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. | ||||
|  | ||||
| ```yaml | ||||
| cameras: | ||||
|   | ||||
| @@ -1,20 +1,20 @@ | ||||
| --- | ||||
| id: hardware_acceleration_video | ||||
| title: Video Decoding | ||||
| id: hardware_acceleration | ||||
| title: Hardware Acceleration | ||||
| --- | ||||
| 
 | ||||
| # Video Decoding | ||||
| # Hardware Acceleration | ||||
| 
 | ||||
| It is highly recommended to use a GPU for hardware acceleration video decoding in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg. | ||||
| It is highly recommended to use a GPU for hardware acceleration in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg. | ||||
| 
 | ||||
| Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro | ||||
| 
 | ||||
| # Object Detection | ||||
| # Officially Supported | ||||
| 
 | ||||
| ## Raspberry Pi 3/4 | ||||
| 
 | ||||
| Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory). | ||||
| If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration. | ||||
| If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration. | ||||
| 
 | ||||
| ```yaml | ||||
| # if you want to decode a h264 stream | ||||
| @@ -28,8 +28,8 @@ ffmpeg: | ||||
| 
 | ||||
| :::note | ||||
| 
 | ||||
| If running Frigate through Docker, you either need to run in privileged mode or | ||||
| map the `/dev/video*` devices to Frigate. With Docker Compose add: | ||||
| If running Frigate in Docker, you either need to run in privileged mode or | ||||
| map the `/dev/video*` devices to Frigate. With Docker compose add: | ||||
| 
 | ||||
| ```yaml | ||||
| services: | ||||
| @@ -69,19 +69,18 @@ Or map in all the `/dev/video*` devices. | ||||
| 
 | ||||
| **Recommended hwaccel Preset** | ||||
| 
 | ||||
| | CPU Generation | Intel Driver | Recommended Preset  | Notes                                | | ||||
| | -------------- | ------------ | ------------------- | ------------------------------------ | | ||||
| | gen1 - gen5    | i965         | preset-vaapi        | qsv is not supported                 | | ||||
| | gen6 - gen7    | iHD          | preset-vaapi        | qsv is not supported                 | | ||||
| | gen8 - gen12   | iHD          | preset-vaapi        | preset-intel-qsv-\* can also be used | | ||||
| | gen13+         | iHD / Xe     | preset-intel-qsv-\* |                                      | | ||||
| | Intel Arc GPU  | iHD / Xe     | preset-intel-qsv-\* |                                      | | ||||
| | CPU Generation | Intel Driver | Recommended Preset | Notes                               | | ||||
| | -------------- | ------------ | ------------------ | ----------------------------------- | | ||||
| | gen1 - gen7    | i965         | preset-vaapi       | qsv is not supported                | | ||||
| | gen8 - gen12   | iHD          | preset-vaapi       | preset-intel-qsv-* can also be used | | ||||
| | gen13+         | iHD / Xe     | preset-intel-qsv-* |                                     | | ||||
| | Intel Arc GPU  | iHD / Xe     | preset-intel-qsv-* |                                     | | ||||
| 
 | ||||
| ::: | ||||
| 
 | ||||
| :::note | ||||
| 
 | ||||
| The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars). | ||||
| The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). | ||||
| 
 | ||||
| See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is. | ||||
| 
 | ||||
| @@ -176,33 +175,23 @@ For more information on the various values across different distributions, see h | ||||
| 
 | ||||
| Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'` | ||||
| 
 | ||||
| #### Stats for SR-IOV or other devices | ||||
| #### Stats for SR-IOV devices | ||||
| 
 | ||||
| When using virtualized GPUs via SR-IOV, you need to specify the device path to use to gather stats from `intel_gpu_top`. This example may work for some systems using SR-IOV: | ||||
| When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config: | ||||
| 
 | ||||
| ```yaml | ||||
| telemetry: | ||||
|   stats: | ||||
|     intel_gpu_device: "sriov" | ||||
|     sriov: True | ||||
| ``` | ||||
| 
 | ||||
| For other virtualized GPUs, try specifying the direct path to the device instead: | ||||
| 
 | ||||
| ```yaml | ||||
| telemetry: | ||||
|   stats: | ||||
|     intel_gpu_device: "drm:/dev/dri/card0" | ||||
| ``` | ||||
| 
 | ||||
| If you are passing in a device path, make sure you've passed the device through to the container. | ||||
| 
 | ||||
| ## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver | ||||
| 
 | ||||
| VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. | ||||
| 
 | ||||
| :::note | ||||
| 
 | ||||
| You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars). | ||||
| You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). | ||||
| 
 | ||||
| ::: | ||||
| 
 | ||||
| @@ -306,7 +295,8 @@ These instructions were originally based on the [Jellyfin documentation](https:/ | ||||
| ## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano) | ||||
| 
 | ||||
| A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build | ||||
| with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection. | ||||
| with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 5.0+ use the `stable-tensorrt-jp5` | ||||
| tagged image, or if your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection. | ||||
| 
 | ||||
| You will need to use the image with the nvidia container runtime: | ||||
| 
 | ||||
| @@ -316,16 +306,17 @@ You will need to use the image with the nvidia container runtime: | ||||
| docker run -d \ | ||||
|   ... | ||||
|   --runtime nvidia | ||||
|   ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6 | ||||
|   ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5 | ||||
| ``` | ||||
| 
 | ||||
| ### Docker Compose - Jetson | ||||
| 
 | ||||
| ```yaml | ||||
| version: '2.4' | ||||
| services: | ||||
|   frigate: | ||||
|     ... | ||||
|     image: ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6 | ||||
|     image: ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5 | ||||
|     runtime: nvidia   # Add this | ||||
| ``` | ||||
| 
 | ||||
| @@ -386,8 +377,13 @@ Make sure to follow the [Rockchip specific installation instructions](/frigate/i | ||||
| Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing: | ||||
| 
 | ||||
| ```yaml | ||||
| # if you try to decode a h264 encoded stream | ||||
| ffmpeg: | ||||
|   hwaccel_args: preset-rkmpp | ||||
|   hwaccel_args: preset-rk-h264 | ||||
| 
 | ||||
| # if you try to decode a h265 (hevc) encoded stream | ||||
| ffmpeg: | ||||
|   hwaccel_args: preset-rk-h265 | ||||
| ``` | ||||
| 
 | ||||
| :::note | ||||
| @@ -395,36 +391,3 @@ ffmpeg: | ||||
| Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet. | ||||
| 
 | ||||
| ::: | ||||
| 
 | ||||
| :::warning | ||||
| 
 | ||||
| If one or more of your cameras are not properly processed and this error is shown in the logs: | ||||
| 
 | ||||
| ``` | ||||
| [segment @ 0xaaaaff694790] Timestamps are unset in a packet for stream 0. This is deprecated and will stop working in the future. Fix your code to set the timestamps properly | ||||
| [Parsed_scale_rkrga_0 @ 0xaaaaff819070] No hw context provided on input | ||||
| [Parsed_scale_rkrga_0 @ 0xaaaaff819070] Failed to configure output pad on Parsed_scale_rkrga_0 | ||||
| Error initializing filters! | ||||
| Error marking filters as finished | ||||
| [out#1/rawvideo @ 0xaaaaff3d8730] Nothing was written into output file, because at least one of its streams received no packets. | ||||
| Restarting ffmpeg... | ||||
| ``` | ||||
| 
 | ||||
| you should try to uprade to FFmpeg 7. This can be done using this config option: | ||||
| 
 | ||||
| ``` | ||||
| ffmpeg: | ||||
|   path: "7.0" | ||||
| ``` | ||||
| 
 | ||||
| You can set this option globally to use FFmpeg 7 for all cameras or on camera level to use it only for specific cameras. Do not confuse this option with: | ||||
| 
 | ||||
| ``` | ||||
| cameras: | ||||
|   name: | ||||
|     ffmpeg: | ||||
|       inputs: | ||||
|         - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
| ``` | ||||
| 
 | ||||
| ::: | ||||
| @@ -1,32 +0,0 @@ | ||||
| --- | ||||
| id: hardware_acceleration_enrichments | ||||
| title: Enrichments | ||||
| --- | ||||
|  | ||||
| # Enrichments | ||||
|  | ||||
| Some of Frigate's enrichments can use a discrete GPU for accelerated processing. | ||||
|  | ||||
| ## Requirements | ||||
|  | ||||
| Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU and configure the enrichment according to its specific documentation. | ||||
|  | ||||
| - **AMD** | ||||
|  | ||||
|   - ROCm will automatically be detected and used for enrichments in the `-rocm` Frigate image. | ||||
|  | ||||
| - **Intel** | ||||
|  | ||||
|   - OpenVINO will automatically be detected and used for enrichments in the default Frigate image. | ||||
|  | ||||
| - **Nvidia** | ||||
|   - Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image. | ||||
|   - Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image. | ||||
|  | ||||
| Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| A Google Coral is a TPU (Tensor Processing Unit), not a dedicated GPU (Graphics Processing Unit) and therefore does not provide any kind of acceleration for Frigate's enrichments. | ||||
|  | ||||
| ::: | ||||
| @@ -3,12 +3,10 @@ id: index | ||||
| title: Frigate Configuration | ||||
| --- | ||||
|  | ||||
| For Home Assistant Add-on installations, the config file should be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](#accessing-add-on-config-dir). | ||||
| For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored. | ||||
|  | ||||
| For all other installation types, the config file should be mapped to `/config/config.yml` inside the container. | ||||
|  | ||||
| It can be named `config.yml` or `config.yaml`, but if both files exist `config.yml` will be preferred and `config.yaml` will be ignored. | ||||
|  | ||||
| It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation. | ||||
|  | ||||
| ```yaml | ||||
| @@ -25,24 +23,9 @@ cameras: | ||||
|             - detect | ||||
| ``` | ||||
|  | ||||
| ## Accessing the Home Assistant Add-on configuration directory {#accessing-add-on-config-dir} | ||||
| ## VSCode Configuration Schema | ||||
|  | ||||
| When running Frigate through the HA Add-on, the Frigate `/config` directory is mapped to `/addon_configs/<addon_directory>` in the host, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. | ||||
|  | ||||
| | Add-on Variant             | Configuration directory                      | | ||||
| | -------------------------- | -------------------------------------------- | | ||||
| | Frigate                    | `/addon_configs/ccab4aaf_frigate`            | | ||||
| | Frigate (Full Access)      | `/addon_configs/ccab4aaf_frigate-fa`         | | ||||
| | Frigate Beta               | `/addon_configs/ccab4aaf_frigate-beta`       | | ||||
| | Frigate Beta (Full Access) | `/addon_configs/ccab4aaf_frigate-fa-beta`    | | ||||
|  | ||||
| **Whenever you see `/config` in the documentation, it refers to this directory.** | ||||
|  | ||||
| If for example you are running the standard Add-on variant and use the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file. | ||||
|  | ||||
| ## VS Code Configuration Schema | ||||
|  | ||||
| VS Code supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VS Code and Frigate as an Add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VS Code on another machine. | ||||
| VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VSCode on another machine. | ||||
|  | ||||
| ## Environment Variable Substitution | ||||
|  | ||||
| @@ -82,10 +65,10 @@ genai: | ||||
|  | ||||
| Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values. | ||||
|  | ||||
| ### Raspberry Pi Home Assistant Add-on with USB Coral | ||||
| ### Raspberry Pi Home Assistant Addon with USB Coral | ||||
|  | ||||
| - Single camera with 720p, 5fps stream for detect | ||||
| - MQTT connected to the Home Assistant Mosquitto Add-on | ||||
| - MQTT connected to home assistant mosquitto addon | ||||
| - Hardware acceleration for decoding video | ||||
| - USB Coral detector | ||||
| - Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not | ||||
|   | ||||
| @@ -3,34 +3,32 @@ id: license_plate_recognition | ||||
| title: License Plate Recognition (LPR) | ||||
| --- | ||||
|  | ||||
| Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. | ||||
| Frigate can recognize license plates on vehicles and automatically add the detected characters or recognized name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. | ||||
|  | ||||
| LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles. | ||||
|  | ||||
| When a plate is recognized, the details are: | ||||
| When a plate is recognized, the detected characters or recognized name is: | ||||
|  | ||||
| - Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object. | ||||
| - Viewable in the Review Item Details pane in Review (sub labels). | ||||
| - Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates). | ||||
| - Added as a `sub_label` to the `car` tracked object. | ||||
| - Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore. | ||||
| - Filterable through the More Filters menu in Explore. | ||||
| - Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object. | ||||
| - Published via the `frigate/tracked_object_update` MQTT topic with `name` (if known) and `plate`. | ||||
| - Published via the `frigate/events` MQTT topic as a `sub_label` for the tracked object. | ||||
|  | ||||
| ## Model Requirements | ||||
|  | ||||
| Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model. | ||||
|  | ||||
| Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that can be configured to run on your CPU or GPU. In this case, you should _not_ define `license_plate` in your list of objects to track. | ||||
| Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU. In this case, you should _not_ define `license_plate` in your list of objects to track. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle` before it can recognize a license plate. If you're using a dedicated LPR camera and have a zoomed-in view where a `car` or `motorcycle` will not be detected, you can still run LPR, but the configuration parameters will differ from the default mode. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section below. | ||||
| Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ## Minimum System Requirements | ||||
|  | ||||
| License plate recognition works by running AI models locally on your system. The models are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required. | ||||
| License plate recognition works by running AI models locally on your system. The models are relatively lightweight and run on your CPU. At least 4GB of RAM is required. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| @@ -41,44 +39,28 @@ lpr: | ||||
|   enabled: True | ||||
| ``` | ||||
|  | ||||
| Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You should disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras: | ||||
|  | ||||
| ```yaml | ||||
| cameras: | ||||
|   garage: | ||||
|     ... | ||||
|     lpr: | ||||
|       enabled: False | ||||
| ``` | ||||
|  | ||||
| For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car` or `motorcycle`, and that a car or motorcycle is actually being detected by Frigate. Otherwise, LPR will not run. | ||||
| Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run. | ||||
|  | ||||
| Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements. | ||||
|  | ||||
| ## Advanced Configuration | ||||
|  | ||||
| Fine-tune the LPR feature using these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`. | ||||
| Fine-tune the LPR feature using these optional parameters: | ||||
|  | ||||
| ### Detection | ||||
|  | ||||
| - **`detection_threshold`**: License plate object detection confidence score required before recognition runs. | ||||
|   - Default: `0.7` | ||||
|   - Note: This is field only applies to the standalone license plate detection model, `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in. | ||||
| - **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs. | ||||
|   - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. | ||||
|   - Note: If you are using a Frigate+ model and you set the `threshold` in your objects config for `license_plate` higher than this value, recognition will never run. It's best to ensure these values match, or this `detection_threshold` is lower than your object config `threshold`. | ||||
| - **`min_area`**: Defines the minimum size (in pixels) a license plate must be before recognition runs. | ||||
|   - Default: `1000` pixels. | ||||
|   - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. | ||||
| - **`device`**: Device to use to run license plate recognition models. | ||||
|   - Default: `CPU` | ||||
|   - This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. | ||||
| - **`model_size`**: The size of the model used to detect text on plates. | ||||
|   - Default: `small` | ||||
|   - This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_. | ||||
|  | ||||
| ### Recognition | ||||
|  | ||||
| - **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a `recognized_license_plate` and/or `sub_label`. | ||||
| - **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a sub label. | ||||
|   - Default: `0.9`. | ||||
| - **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a `recognized_license_plate` and/or `sub_label` to an object. | ||||
| - **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub label to an object. | ||||
|   - Use this to filter out short, incomplete, or incorrect detections. | ||||
| - **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded. | ||||
|   - `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7" | ||||
| @@ -87,35 +69,18 @@ Fine-tune the LPR feature using these optional parameters at the global level of | ||||
|  | ||||
| ### Matching | ||||
|  | ||||
| - **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` and `motorcycle` objects when a recognized plate matches a known value. | ||||
| - **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` objects when a recognized plate matches a known value. | ||||
|   - These labels appear in the UI, filters, and notifications. | ||||
|   - Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`. | ||||
| - **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate. | ||||
|   - For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. | ||||
|   - This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`. | ||||
|  | ||||
| ### Image Enhancement | ||||
|  | ||||
| - **`enhancement`**: A value between 0 and 10 that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. This preprocessing step can sometimes improve accuracy but may also have the opposite effect. | ||||
|   - Default: `0` (no enhancement) | ||||
|   - Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters, actually making them much harder for Frigate to recognize. | ||||
|   - This setting is best adjusted at the camera level if running LPR on multiple cameras. | ||||
|   - If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 5 and adjusting as needed. You should see how different enhancement levels affect your plates. Use the `debug_save_plates` configuration option (see below). | ||||
|  | ||||
| ### Debugging | ||||
|  | ||||
| - **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `<camera>/<event_id>`, and named based on the capture timestamp. | ||||
|   - These saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection. | ||||
|   - **Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage. | ||||
|  | ||||
| ## Configuration Examples | ||||
|  | ||||
| These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`. | ||||
|  | ||||
| ```yaml | ||||
| lpr: | ||||
|   enabled: True | ||||
|   min_area: 1500 # Ignore plates with an area (length x width) smaller than 1500 pixels | ||||
|   min_area: 1500 # Ignore plates smaller than 1500 pixels | ||||
|   min_plate_length: 4 # Only recognize plates with 4 or more characters | ||||
|   known_plates: | ||||
|     Wife's Car: | ||||
| @@ -132,7 +97,7 @@ lpr: | ||||
| ```yaml | ||||
| lpr: | ||||
|   enabled: True | ||||
|   min_area: 4000 # Run recognition on larger plates only (4000 pixels represents a 63x63 pixel square in your image) | ||||
|   min_area: 4000 # Run recognition on larger plates only | ||||
|   recognition_threshold: 0.85 | ||||
|   format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers | ||||
|   match_distance: 1 # Allow one character variation in plate matching | ||||
| @@ -144,181 +109,22 @@ lpr: | ||||
|       - "MN D3163" | ||||
| ``` | ||||
|  | ||||
| :::note | ||||
|  | ||||
| If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras. | ||||
|  | ||||
| ```yaml | ||||
| cameras: | ||||
|   side_yard: | ||||
|     lpr: | ||||
|       enabled: False | ||||
|     ... | ||||
| ``` | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ## Dedicated LPR Cameras | ||||
|  | ||||
| Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night. | ||||
|  | ||||
| To mark a camera as a dedicated LPR camera, add `type: "lpr"` the camera configuration. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| Frigate's dedicated LPR mode is optimized for cameras with a narrow field of view, specifically positioned and zoomed to capture license plates exclusively. If your camera provides a general overview of a scene rather than a tightly focused view, this mode is not recommended. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| Users can configure Frigate's dedicated LPR mode in two different ways depending on whether a Frigate+ (or native `license_plate` detecting) model is used: | ||||
|  | ||||
| ### Using a Frigate+ (or Native `license_plate` Detecting) Model | ||||
|  | ||||
| Users running a Frigate+ model (or any model that natively detects `license_plate`) can take advantage of `license_plate` detection. This allows license plates to be treated as standard objects in dedicated LPR mode, meaning that alerts, detections, snapshots, and other Frigate features work as usual, and plates are detected efficiently through your configured object detector. | ||||
|  | ||||
| An example configuration for a dedicated LPR camera using a `license_plate`-detecting model: | ||||
|  | ||||
| ```yaml | ||||
| # LPR global configuration | ||||
| lpr: | ||||
|   enabled: True | ||||
|   device: CPU # can also be GPU if available | ||||
|  | ||||
| # Dedicated LPR camera configuration | ||||
| cameras: | ||||
|   dedicated_lpr_camera: | ||||
|     type: "lpr" # required to use dedicated LPR camera mode | ||||
|     ffmpeg: ... # add your streams | ||||
|     detect: | ||||
|       enabled: True | ||||
|       fps: 5 # increase to 10 if vehicles move quickly across your frame. Higher than 10 is unnecessary and is not recommended. | ||||
|       min_initialized: 2 | ||||
|       width: 1920 | ||||
|       height: 1080 | ||||
|     objects: | ||||
|       track: | ||||
|         - license_plate | ||||
|       filters: | ||||
|         license_plate: | ||||
|           threshold: 0.7 | ||||
|     motion: | ||||
|       threshold: 30 | ||||
|       contour_area: 60 # use an increased value to tune out small motion changes | ||||
|       improve_contrast: false | ||||
|       mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked | ||||
|     record: | ||||
|       enabled: True # disable recording if you only want snapshots | ||||
|     snapshots: | ||||
|       enabled: True | ||||
|     review: | ||||
|       detections: | ||||
|         labels: | ||||
|           - license_plate | ||||
| ``` | ||||
|  | ||||
| With this setup: | ||||
|  | ||||
| - License plates are treated as normal objects in Frigate. | ||||
| - Scores, alerts, detections, and snapshots work as expected. | ||||
| - Snapshots will have license plate bounding boxes on them. | ||||
| - The `frigate/events` MQTT topic will publish tracked object updates. | ||||
| - Debug view will display `license_plate` bounding boxes. | ||||
| - If you are using a Frigate+ model and want to submit images from your dedicated LPR camera for model training and fine-tuning, annotate both the `car` / `motorcycle` and the `license_plate` in the snapshots on the Frigate+ website, even if the car is barely visible. | ||||
|  | ||||
| ### Using the Secondary LPR Pipeline (Without Frigate+) | ||||
|  | ||||
| If you are not running a Frigate+ model, you can use Frigate’s built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs. | ||||
|  | ||||
| An example configuration for a dedicated LPR camera using the secondary pipeline: | ||||
|  | ||||
| ```yaml | ||||
| # LPR global configuration | ||||
| lpr: | ||||
|   enabled: True | ||||
|   device: CPU # can also be GPU if available and correct Docker image is used | ||||
|   detection_threshold: 0.7 # change if necessary | ||||
|  | ||||
| # Dedicated LPR camera configuration | ||||
| cameras: | ||||
|   dedicated_lpr_camera: | ||||
|     type: "lpr" # required to use dedicated LPR camera mode | ||||
|     lpr: | ||||
|       enabled: True | ||||
|       enhancement: 3 # optional, enhance the image before trying to recognize characters | ||||
|     ffmpeg: ... # add your streams | ||||
|     detect: | ||||
|       enabled: False # disable Frigate's standard object detection pipeline | ||||
|       fps: 5 # increase if necessary, though high values may slow down Frigate's enrichments pipeline and use considerable CPU | ||||
|       width: 1920 | ||||
|       height: 1080 | ||||
|     objects: | ||||
|       track: [] # required when not using a Frigate+ model for dedicated LPR mode | ||||
|     motion: | ||||
|       threshold: 30 | ||||
|       contour_area: 60 # use an increased value here to tune out small motion changes | ||||
|       improve_contrast: false | ||||
|       mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked | ||||
|     record: | ||||
|       enabled: True # disable recording if you only want snapshots | ||||
|     review: | ||||
|       detections: | ||||
|         enabled: True | ||||
|         retain: | ||||
|           default: 7 | ||||
| ``` | ||||
|  | ||||
| With this setup: | ||||
|  | ||||
| - The standard object detection pipeline is bypassed. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. You must **not** specify `license_plate` as an object to track. | ||||
| - The license plate detector runs on the full frame whenever motion is detected and processes frames according to your detect `fps` setting. | ||||
| - Review items will always be classified as a `detection`. | ||||
| - Snapshots will always be saved. | ||||
| - Zones and object masks are **not** used. | ||||
| - The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a known plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field. | ||||
| - License plate snapshots are saved at the highest-scoring moment and appear in Explore. | ||||
| - Debug view will not show `license_plate` bounding boxes. | ||||
|  | ||||
| ### Summary | ||||
|  | ||||
| | Feature                 | Native `license_plate` detecting Model (like Frigate+) | Secondary Pipeline (without native model or Frigate+)           | | ||||
| | ----------------------- | ------------------------------------------------------ | --------------------------------------------------------------- | | ||||
| | License Plate Detection | Uses `license_plate` as a tracked object               | Runs a dedicated LPR pipeline                                   | | ||||
| | FPS Setting             | 5 (increase for fast-moving cars)                      | 5 (increase for fast-moving cars, but it may use much more CPU) | | ||||
| | Object Detection        | Standard Frigate+ detection applies                    | Bypasses standard object detection                              | | ||||
| | Debug View              | May show `license_plate` bounding boxes                | May **not** show `license_plate` bounding boxes                 | | ||||
| | MQTT `frigate/events`   | Publishes tracked object updates                       | Publishes limited updates                                       | | ||||
| | Explore                 | Recognized plates available in More Filters            | Recognized plates available in More Filters                     | | ||||
|  | ||||
| By selecting the appropriate configuration, users can optimize their dedicated LPR cameras based on whether they are using a Frigate+ model or the secondary LPR pipeline. | ||||
|  | ||||
| ### Best practices for using Dedicated LPR camera mode | ||||
|  | ||||
| - Tune your motion detection and increase the `contour_area` until you see only larger motion boxes being created as cars pass through the frame (likely somewhere between 50-90 for a 1920x1080 detect stream). Increasing the `contour_area` filters out small areas of motion and will prevent excessive resource use from looking for license plates in frames that don't even have a car passing through it. | ||||
| - Disable the `improve_contrast` motion setting, especially if you are running LPR at night and the frame is mostly dark. This will prevent small pixel changes and smaller areas of motion from triggering license plate detection. | ||||
| - Ensure your camera's timestamp is covered with a motion mask so that it's not incorrectly detected as a license plate. | ||||
| - For non-Frigate+ users, you may need to change your camera settings for a clearer image or decrease your global `recognition_threshold` config if your plates are not being accurately recognized at night. | ||||
| - The secondary pipeline mode runs a local AI model on your CPU or GPU (depending on how `device` is configured) to detect plates. Increasing detect `fps` will increase resource usage proportionally. | ||||
|  | ||||
| ## FAQ | ||||
|  | ||||
| ### Why isn't my license plate being detected and recognized? | ||||
|  | ||||
| Ensure that: | ||||
|  | ||||
| - Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate's characters, Frigate certainly won't be able to, even if the model is recognizing a `license_plate`. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling. | ||||
| - Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate, Frigate certainly won't be able to. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling. | ||||
| - The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream. | ||||
| - Your `enhancement` level (if you've changed it from the default of `0`) is not too high. Too much enhancement will run too much denoising and cause the plate characters to become blurry and unreadable. | ||||
| - A `car` is detected first, as LPR only runs on recognized vehicles. | ||||
|  | ||||
| If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track. | ||||
| If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track. | ||||
|  | ||||
| Recognized plates will show as object labels in the debug view and will appear in the "Recognized License Plates" select box in the More Filters popout in Explore. | ||||
| ### Can I run LPR without detecting `car` objects? | ||||
|  | ||||
| If you are still having issues detecting plates, start with a basic configuration and see the debugging tips below. | ||||
|  | ||||
| ### Can I run LPR without detecting `car` or `motorcycle` objects? | ||||
|  | ||||
| In normal LPR mode, Frigate requires a `car` or `motorcycle` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above. | ||||
| No, Frigate requires a `car` to be detected first before recognizing a license plate. | ||||
|  | ||||
| ### How can I improve detection accuracy? | ||||
|  | ||||
| @@ -330,66 +136,17 @@ In normal LPR mode, Frigate requires a `car` or `motorcycle` to be detected firs | ||||
|  | ||||
| Yes, but performance depends on camera quality, lighting, and infrared capabilities. Make sure your camera can capture clear images of plates at night. | ||||
|  | ||||
| ### Can I limit LPR to specific zones? | ||||
|  | ||||
| LPR, like other Frigate enrichments, runs at the camera level rather than the zone level. While you can't restrict LPR to specific zones directly, you can control when recognition runs by setting a `min_area` value to filter out smaller detections. | ||||
|  | ||||
| ### How can I match known plates with minor variations? | ||||
|  | ||||
| Use `match_distance` to allow small character mismatches. Alternatively, define multiple variations in `known_plates`. | ||||
|  | ||||
| ### How do I debug LPR issues? | ||||
|  | ||||
| Start with ["Why isn't my license plate being detected and recognized?"](#why-isnt-my-license-plate-being-detected-and-recognized). If you are still having issues, work through these steps. | ||||
|  | ||||
| 1. Enable debug logs to see exactly what Frigate is doing. | ||||
|  | ||||
|    - Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only keep this enabled when necessary. | ||||
|  | ||||
|      ```yaml | ||||
|      logger: | ||||
|        default: info | ||||
|        logs: | ||||
|          frigate.data_processing.common.license_plate: debug | ||||
|      ``` | ||||
|  | ||||
| 2. Ensure your plates are being _detected_. | ||||
|  | ||||
|    If you are using a Frigate+ or `license_plate` detecting model: | ||||
|  | ||||
|    - Watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected. | ||||
|    - View MQTT messages for `frigate/events` to verify detected plates. | ||||
|    - You may need to adjust your `min_score` and/or `threshold` for the `license_plate` object if your plates are not being detected. | ||||
|  | ||||
|    If you are **not** using a Frigate+ or `license_plate` detecting model: | ||||
|  | ||||
|    - Watch the debug logs for messages from the YOLOv9 plate detector. | ||||
|    - You may need to adjust your `detection_threshold` if your plates are not being detected. | ||||
|  | ||||
| 3. Ensure the characters on detected plates are being _recognized_. | ||||
|  | ||||
|    - Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`). Ensure these images are readable and the text is clear. | ||||
|    - Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` or `motorcycle` label will change to the recognized plate when LPR is enabled and working. | ||||
|    - Adjust `recognition_threshold` settings per the suggestions [above](#advanced-configuration). | ||||
| - View MQTT messages for `frigate/events` to verify detected plates. | ||||
| - Adjust `detection_threshold` and `recognition_threshold` settings. | ||||
| - If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`. | ||||
| - Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary. | ||||
|  | ||||
| ### Will LPR slow down my system? | ||||
|  | ||||
| LPR's performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU or GPU for optimal results. If you are running the Dedicated LPR Camera mode, resource usage will be higher compared to users who run a model that natively detects license plates. Tune your motion detection settings for your dedicated LPR camera so that the license plate detection model runs only when necessary. | ||||
|  | ||||
| ### I am seeing a YOLOv9 plate detection metric in Enrichment Metrics, but I have a Frigate+ or custom model that detects `license_plate`. Why is the YOLOv9 model running? | ||||
|  | ||||
| The YOLOv9 license plate detector model will run (and the metric will appear) if you've enabled LPR but haven't defined `license_plate` as an object to track, either at the global or camera level. | ||||
|  | ||||
| If you are detecting `car` or `motorcycle` on cameras where you don't want to run LPR, make sure you disable LPR it at the camera level. And if you do want to run LPR on those cameras, make sure you define `license_plate` as an object to track. | ||||
|  | ||||
| ### It looks like Frigate picked up my camera's timestamp or overlay text as the license plate. How can I prevent this? | ||||
|  | ||||
| This could happen if cars or motorcycles travel close to your camera's timestamp or overlay text. You could either move the text through your camera's firmware, or apply a mask to it in Frigate. | ||||
|  | ||||
| If you are using a model that natively detects `license_plate`, add an _object mask_ of type `license_plate` and a _motion mask_ over your text. | ||||
|  | ||||
| If you are not using a model that natively detects `license_plate` or you are using dedicated LPR camera mode, only a _motion mask_ over your text is required. | ||||
|  | ||||
| ### I see "Error running ... model" in my logs. How can I fix this? | ||||
|  | ||||
| This usually happens when your GPU is unable to compile or use one of the LPR models. Set your `device` to `CPU` and try again. GPU acceleration only provides a slight performance increase, and the models are lightweight enough to run without issue on most CPUs. | ||||
| LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results. | ||||
|   | ||||
| @@ -23,7 +23,7 @@ If you are using go2rtc, you should adjust the following settings in your camera | ||||
|  | ||||
| - Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below). | ||||
| - Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio. | ||||
| - I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well. | ||||
| - I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well. | ||||
|  | ||||
| The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information. | ||||
|  | ||||
| @@ -42,16 +42,6 @@ go2rtc: | ||||
|       - "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) | ||||
| ``` | ||||
|  | ||||
| If your camera does not support AAC audio or are having problems with Live view, try transcoding to AAC audio directly: | ||||
|  | ||||
| ```yaml | ||||
| go2rtc: | ||||
|   streams: | ||||
|     rtsp_cam: # <- for RTSP streams | ||||
|       - "ffmpeg:rtsp://192.168.1.5:554/live0#video=copy#audio=aac" # <- copies video stream and transcodes to aac audio | ||||
|       - "ffmpeg:rtsp_cam#audio=opus" # <- provides support for WebRTC | ||||
| ``` | ||||
|  | ||||
| If your camera does not have audio and you are having problems with Live view, you should have go2rtc send video only: | ||||
|  | ||||
| ```yaml | ||||
| @@ -114,9 +104,9 @@ cameras: | ||||
| WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration: | ||||
|  | ||||
| - For external access, over the internet, setup your router to forward port `8555` to port `8555` on the Frigate device, for both TCP and UDP. | ||||
| - For internal/local access, unless you are running through the HA Add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate: | ||||
| - For internal/local access, unless you are running through the add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate: | ||||
|  | ||||
|   ```yaml title="config.yml" | ||||
|   ```yaml title="/config/frigate.yaml" | ||||
|   go2rtc: | ||||
|     streams: | ||||
|       test_cam: ... | ||||
| @@ -131,9 +121,9 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| This extra configuration may not be required if Frigate has been installed as a Home Assistant Add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate. | ||||
| This extra configuration may not be required if Frigate has been installed as a Home Assistant add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate. | ||||
|  | ||||
| However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate Add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the Add-on logs page during the initialization: | ||||
| However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the add-on logs page during the initialization: | ||||
|  | ||||
| ```log | ||||
| [WARN] Failed to get IP address from supervisor | ||||
| @@ -172,7 +162,7 @@ For devices that support two way talk, Frigate can be configured to use the feat | ||||
|  | ||||
| - Set up go2rtc with [WebRTC](#webrtc-extra-configuration). | ||||
| - Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)). | ||||
| - For the Home Assistant Frigate card, [follow the docs](http://card.camera/#/usage/2-way-audio) for the correct source. | ||||
| - For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source. | ||||
|  | ||||
| To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell) | ||||
|  | ||||
| @@ -189,12 +179,7 @@ Frigate provides a dialog in the Camera Group Edit pane with several options for | ||||
|  | ||||
| :::note | ||||
|  | ||||
| The default dashboard ("All Cameras") will always use: | ||||
|  | ||||
| - Smart Streaming, unless you've disabled the global Automatic Live View in Settings. | ||||
| - The first entry set in your `streams` configuration, if defined. | ||||
|  | ||||
| Use a camera group if you want to change any of these settings from the defaults. | ||||
| The default dashboard ("All Cameras") will always use Smart Streaming and the first entry set in your `streams` configuration, if defined. Use a camera group if you want to change any of these settings from the defaults. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -202,12 +187,6 @@ Use a camera group if you want to change any of these settings from the defaults | ||||
|  | ||||
| Cameras can be temporarily disabled through the Frigate UI and through [MQTT](/integrations/mqtt#frigatecamera_nameenabledset) to conserve system resources. When disabled, Frigate's ffmpeg processes are terminated — recording stops, object detection is paused, and the Live dashboard displays a blank image with a disabled message. Review items, tracked objects, and historical footage for disabled cameras can still be accessed via the UI. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| Disabling a camera via the Frigate UI or MQTT is temporary and does not persist through restarts of Frigate. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| For restreamed cameras, go2rtc remains active but does not use system resources for decoding or processing unless there are active external consumers (such as the Advanced Camera Card in Home Assistant using a go2rtc source). | ||||
|  | ||||
| Note that disabling a camera through the config file (`enabled: False`) removes all related UI elements, including historical footage access. To retain access while disabling the camera, keep it enabled in the config and use the UI or MQTT to disable it temporarily. | ||||
| @@ -224,11 +203,9 @@ Note that disabling a camera through the config file (`enabled: False`) removes | ||||
|  | ||||
|    Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible. | ||||
|  | ||||
|    When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. Continuous streaming mode does not have an automatic reset mechanism, but you can use the _Reset_ option to force a reload of your stream. | ||||
|    When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream. | ||||
|  | ||||
|    If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again. | ||||
|  | ||||
|    If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations). | ||||
|    If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available. | ||||
|  | ||||
| 3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?** | ||||
|  | ||||
| @@ -244,8 +221,6 @@ Note that disabling a camera through the config file (`enabled: False`) removes | ||||
|  | ||||
|    This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats. | ||||
|  | ||||
|    Smart streaming depends on having your camera's motion `threshold` and `contour_area` config values dialed in. Use the Motion Tuner in Settings in the UI to tune these values in real-time. | ||||
|  | ||||
|    This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras. | ||||
|  | ||||
| 6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?** | ||||
|   | ||||
| @@ -77,7 +77,7 @@ At this point if motion is working as desired there is no reason to continue wit | ||||
|  | ||||
| Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. | ||||
|  | ||||
| However, if the preferred day settings do not work well at night it is recommended to use Home Assistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. | ||||
| However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. | ||||
|  | ||||
| ## Tuning For Large Changes In Motion | ||||
|  | ||||
| @@ -104,4 +104,4 @@ Lightning threshold does not stop motion based recordings from being saved. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. | ||||
| Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. | ||||
|   | ||||
| @@ -24,13 +24,10 @@ Frigate supports multiple different detectors that work on different types of ha | ||||
| - [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. | ||||
| - [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured. | ||||
|  | ||||
| **Nvidia GPU** | ||||
| **Nvidia** | ||||
|  | ||||
| - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured. | ||||
|  | ||||
| **Nvidia Jetson** | ||||
| - [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models. | ||||
| - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured. | ||||
| - [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models. | ||||
| - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured. | ||||
|  | ||||
| **Rockchip** | ||||
|  | ||||
| @@ -132,9 +129,9 @@ detectors: | ||||
|     type: edgetpu | ||||
|     device: pci | ||||
| ``` | ||||
|  | ||||
| --- | ||||
|  | ||||
|  | ||||
| ## Hailo-8 | ||||
|  | ||||
| This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleration Modules. The integration automatically detects your hardware architecture via the Hailo CLI and selects the appropriate default model if no custom model is specified. | ||||
| @@ -143,19 +140,18 @@ See the [installation docs](../frigate/installation.md#hailo-8l) for information | ||||
|  | ||||
| ### Configuration | ||||
|  | ||||
| When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**. | ||||
| When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**.   | ||||
| If both are provided, the detector will first check for the model at the given local path. If the file is not found, it will download the model from the specified URL. The model file is cached under `/config/model_cache/hailo`. | ||||
|  | ||||
| #### YOLO | ||||
| #### YOLO  | ||||
|  | ||||
| Use this configuration for YOLO-based models. When no custom model path or URL is provided, the detector automatically downloads the default model based on the detected hardware: | ||||
|  | ||||
| - **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`) | ||||
| - **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`) | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   hailo: | ||||
|   hailo8l: | ||||
|     type: hailo8l | ||||
|     device: PCIe | ||||
|  | ||||
| @@ -166,7 +162,6 @@ model: | ||||
|   input_pixel_format: rgb | ||||
|   input_dtype: int | ||||
|   model_type: yolo-generic | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
|  | ||||
|   # The detector automatically selects the default model based on your hardware: | ||||
|   # - For Hailo-8 hardware: YOLOv6n (default: yolov6n.hef) | ||||
| @@ -188,7 +183,7 @@ For SSD-based models, provide either a model path or URL to your compiled SSD mo | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   hailo: | ||||
|   hailo8l: | ||||
|     type: hailo8l | ||||
|     device: PCIe | ||||
|  | ||||
| @@ -212,7 +207,7 @@ The Hailo detector supports all YOLO models compiled for Hailo hardware that inc | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   hailo: | ||||
|   hailo8l: | ||||
|     type: hailo8l | ||||
|     device: PCIe | ||||
|  | ||||
| @@ -223,23 +218,23 @@ model: | ||||
|   input_pixel_format: rgb | ||||
|   input_dtype: int | ||||
|   model_type: yolo-generic | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
|   # Optional: Specify a local model path. | ||||
|   # path: /config/model_cache/hailo/custom_model.hef | ||||
|   # | ||||
|   # Alternatively, or as a fallback, provide a custom URL: | ||||
|   # path: https://custom-model-url.com/path/to/model.hef | ||||
| ``` | ||||
|  | ||||
| For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo | ||||
|  | ||||
| Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation. | ||||
| Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation.  | ||||
|  | ||||
| > **Note:** | ||||
| > **Note:**   | ||||
| > The config.path parameter can accept either a local file path or a URL ending with .hef. When provided, the detector will first check if the path is a local file path. If the file exists locally, it will use it directly. If the file is not found locally or if a URL was provided, it will attempt to download the model from the specified URL. | ||||
|  | ||||
| --- | ||||
|  | ||||
|  | ||||
|  | ||||
| ## OpenVINO Detector | ||||
|  | ||||
| The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. | ||||
| @@ -315,13 +310,13 @@ model: | ||||
|  | ||||
| Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. | ||||
|  | ||||
| #### YOLO (v3, v4, v7, v9) | ||||
| #### YOLOv9 | ||||
|  | ||||
| YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default. | ||||
| [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default. | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. | ||||
| The YOLOv9 detector has been designed to support YOLOv9 models, but may support other YOLO model architectures as well. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -334,74 +329,122 @@ detectors: | ||||
|     device: GPU | ||||
|  | ||||
| model: | ||||
|   model_type: yolo-generic | ||||
|   width: 320 # <--- should match the imgsize set during model export | ||||
|   height: 320 # <--- should match the imgsize set during model export | ||||
|   model_type: yolov9 | ||||
|   width: 640 # <--- should match the imgsize set during model export | ||||
|   height: 640 # <--- should match the imgsize set during model export | ||||
|   input_tensor: nchw | ||||
|   input_dtype: float | ||||
|   path: /config/model_cache/yolo.onnx | ||||
|   path: /config/model_cache/yolov9-t.onnx | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. | ||||
|  | ||||
| #### RF-DETR | ||||
| ## NVidia TensorRT Detector | ||||
|  | ||||
| [RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate. | ||||
| Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. | ||||
|  | ||||
| :::warning | ||||
| ### Minimum Hardware Support | ||||
|  | ||||
| Due to the size and complexity of the RF-DETR model, it is only recommended to be run with discrete Arc Graphics Cards. | ||||
| The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. | ||||
|  | ||||
| ::: | ||||
| To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. | ||||
|  | ||||
| After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: | ||||
| There are improved capabilities in newer GPU architectures that TensorRT can benefit from, such as INT8 operations and Tensor cores. The features compatible with your hardware will be optimized when the model is converted to a trt file. Currently the script provided for generating the model provides a switch to enable/disable FP16 operations. If you wish to use newer features such as INT8 optimization, more work is required. | ||||
|  | ||||
| #### Compatibility References: | ||||
|  | ||||
| [NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-841/support-matrix/index.html) | ||||
|  | ||||
| [NVIDIA CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html) | ||||
|  | ||||
| [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) | ||||
|  | ||||
| ### Generate Models | ||||
|  | ||||
| The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models. | ||||
|  | ||||
| The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. | ||||
|  | ||||
| By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. | ||||
|  | ||||
| If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU. | ||||
|  | ||||
| If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it. | ||||
|  | ||||
| Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. | ||||
|  | ||||
| <details> | ||||
| <summary>Available Models</summary> | ||||
| ``` | ||||
| yolov3-288 | ||||
| yolov3-416 | ||||
| yolov3-608 | ||||
| yolov3-spp-288 | ||||
| yolov3-spp-416 | ||||
| yolov3-spp-608 | ||||
| yolov3-tiny-288 | ||||
| yolov3-tiny-416 | ||||
| yolov4-288 | ||||
| yolov4-416 | ||||
| yolov4-608 | ||||
| yolov4-csp-256 | ||||
| yolov4-csp-512 | ||||
| yolov4-p5-448 | ||||
| yolov4-p5-896 | ||||
| yolov4-tiny-288 | ||||
| yolov4-tiny-416 | ||||
| yolov4x-mish-320 | ||||
| yolov4x-mish-640 | ||||
| yolov7-tiny-288 | ||||
| yolov7-tiny-416 | ||||
| yolov7-640 | ||||
| yolov7-416 | ||||
| yolov7-320 | ||||
| yolov7x-640 | ||||
| yolov7x-320 | ||||
| ``` | ||||
| </details> | ||||
|  | ||||
| An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this: | ||||
|  | ||||
| ```yml | ||||
| frigate: | ||||
|   environment: | ||||
|     - YOLO_MODELS=yolov7-320,yolov7x-640 | ||||
|     - USE_FP16=false | ||||
| ``` | ||||
|  | ||||
| If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU. | ||||
|  | ||||
| ```yml | ||||
| frigate: | ||||
|   environment: | ||||
|     - TRT_MODEL_PREP_DEVICE=0 # Optionally, select which GPU is used for  model optimization | ||||
| ``` | ||||
|  | ||||
| ### Configuration Parameters | ||||
|  | ||||
| The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. | ||||
|  | ||||
| The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. | ||||
|  | ||||
| Use the config below to work with generated TRT models: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   ov: | ||||
|     type: openvino | ||||
|     device: GPU | ||||
|   tensorrt: | ||||
|     type: tensorrt | ||||
|     device: 0 #This is the default, select the first GPU | ||||
|  | ||||
| model: | ||||
|   model_type: rfdetr | ||||
|   path: /config/model_cache/tensorrt/yolov7-320.trt | ||||
|   input_tensor: nchw | ||||
|   input_pixel_format: rgb | ||||
|   width: 320 | ||||
|   height: 320 | ||||
|   input_tensor: nchw | ||||
|   input_dtype: float | ||||
|   path: /config/model_cache/rfdetr.onnx | ||||
| ``` | ||||
|  | ||||
| #### D-FINE | ||||
|  | ||||
| [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model | ||||
|  | ||||
| ::: | ||||
|  | ||||
| After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   ov: | ||||
|     type: openvino | ||||
|     device: GPU | ||||
|  | ||||
| model: | ||||
|   model_type: dfine | ||||
|   width: 640 | ||||
|   height: 640 | ||||
|   input_tensor: nchw | ||||
|   input_dtype: float | ||||
|   path: /config/model_cache/dfine_s_obj2coco.onnx | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. | ||||
|  | ||||
| ## AMD/ROCm GPU detector | ||||
|  | ||||
| ### Setup | ||||
| @@ -419,7 +462,7 @@ $ docker run --device=/dev/kfd --device=/dev/dri  \ | ||||
|     ... | ||||
| ``` | ||||
|  | ||||
| When using Docker Compose: | ||||
| When using docker compose: | ||||
|  | ||||
| ```yaml | ||||
| services: | ||||
| @@ -451,7 +494,7 @@ $ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \ | ||||
|     ... | ||||
| ``` | ||||
|  | ||||
| When using Docker Compose: | ||||
| When using docker compose: | ||||
|  | ||||
| ```yaml | ||||
| services: | ||||
| @@ -486,7 +529,6 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/ | ||||
| ### Supported Models | ||||
|  | ||||
| See [ONNX supported models](#supported-models) for supported models, there are some caveats: | ||||
|  | ||||
| - D-FINE models are not supported | ||||
| - YOLO-NAS models are known to not run well on integrated GPUs | ||||
|  | ||||
| @@ -508,7 +550,7 @@ If the correct build is used for your GPU then the GPU will be detected and used | ||||
|  | ||||
| - **Nvidia** | ||||
|   - Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image. | ||||
|   - Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp6` Frigate image. | ||||
|   - Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -551,13 +593,13 @@ model: | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| #### YOLO (v3, v4, v7, v9) | ||||
| #### YOLOv9 | ||||
|  | ||||
| YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default. | ||||
| [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default. | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate. | ||||
| The YOLOv9 detector has been designed to support YOLOv9 models, but may support other YOLO model architectures as well. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -569,65 +611,28 @@ detectors: | ||||
|     type: onnx | ||||
|  | ||||
| model: | ||||
|   model_type: yolo-generic | ||||
|   width: 320 # <--- should match the imgsize set during model export | ||||
|   height: 320 # <--- should match the imgsize set during model export | ||||
|   model_type: yolov9 | ||||
|   width: 640 # <--- should match the imgsize set during model export | ||||
|   height: 640 # <--- should match the imgsize set during model export | ||||
|   input_tensor: nchw | ||||
|   input_dtype: float | ||||
|   path: /config/model_cache/yolo.onnx | ||||
|   path: /config/model_cache/yolov9-t.onnx | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. | ||||
|  | ||||
| #### YOLOx | ||||
|  | ||||
| [YOLOx](https://github.com/Megvii-BaseDetection/YOLOX) models are supported, but not included by default. See [the models section](#downloading-yolo-models) for more information on downloading the YOLOx model for use in Frigate. | ||||
|  | ||||
| After placing the downloaded onnx model in your config folder, you can use the following configuration: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   onnx: | ||||
|     type: onnx | ||||
|  | ||||
| model: | ||||
|   model_type: yolox | ||||
|   width: 416 # <--- should match the imgsize set during model export | ||||
|   height: 416 # <--- should match the imgsize set during model export | ||||
|   input_tensor: nchw | ||||
|   input_dtype: float_denorm | ||||
|   path: /config/model_cache/yolox_tiny.onnx | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. | ||||
|  | ||||
| #### RF-DETR | ||||
|  | ||||
| [RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more information on downloading the RF-DETR model for use in Frigate. | ||||
|  | ||||
| After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   onnx: | ||||
|     type: onnx | ||||
|  | ||||
| model: | ||||
|   model_type: rfdetr | ||||
|   width: 320 | ||||
|   height: 320 | ||||
|   input_tensor: nchw | ||||
|   input_dtype: float | ||||
|   path: /config/model_cache/rfdetr.onnx | ||||
| ``` | ||||
|  | ||||
| #### D-FINE | ||||
|  | ||||
| [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. | ||||
| [D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. | ||||
|  | ||||
| After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: | ||||
| :::warning | ||||
|  | ||||
| D-FINE is currently not supported on OpenVINO | ||||
|  | ||||
| ::: | ||||
|  | ||||
| After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
| @@ -699,88 +704,6 @@ To verify that the integration is working correctly, start Frigate and observe t | ||||
|  | ||||
| # Community Supported Detectors | ||||
|  | ||||
| ## NVidia TensorRT Detector | ||||
|  | ||||
| Nvidia Jetson devices may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt-jp6` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6`. This detector is designed to work with Yolo models for object detection. | ||||
|  | ||||
| ### Generate Models | ||||
|  | ||||
| The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models. | ||||
|  | ||||
| The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. | ||||
|  | ||||
| By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. | ||||
|  | ||||
| If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU. | ||||
|  | ||||
| If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it. | ||||
|  | ||||
| Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. | ||||
|  | ||||
| <details> | ||||
| <summary>Available Models</summary> | ||||
| ``` | ||||
| yolov3-288 | ||||
| yolov3-416 | ||||
| yolov3-608 | ||||
| yolov3-spp-288 | ||||
| yolov3-spp-416 | ||||
| yolov3-spp-608 | ||||
| yolov3-tiny-288 | ||||
| yolov3-tiny-416 | ||||
| yolov4-288 | ||||
| yolov4-416 | ||||
| yolov4-608 | ||||
| yolov4-csp-256 | ||||
| yolov4-csp-512 | ||||
| yolov4-p5-448 | ||||
| yolov4-p5-896 | ||||
| yolov4-tiny-288 | ||||
| yolov4-tiny-416 | ||||
| yolov4x-mish-320 | ||||
| yolov4x-mish-640 | ||||
| yolov7-tiny-288 | ||||
| yolov7-tiny-416 | ||||
| yolov7-640 | ||||
| yolov7-416 | ||||
| yolov7-320 | ||||
| yolov7x-640 | ||||
| yolov7x-320 | ||||
| ``` | ||||
| </details> | ||||
|  | ||||
| An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models would look something like this: | ||||
|  | ||||
| ```yml | ||||
| frigate: | ||||
|   environment: | ||||
|     - YOLO_MODELS=yolov7-320,yolov7x-640 | ||||
|     - USE_FP16=false | ||||
| ``` | ||||
|  | ||||
| ### Configuration Parameters | ||||
|  | ||||
| The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration_video.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. | ||||
|  | ||||
| The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. | ||||
|  | ||||
| Use the config below to work with generated TRT models: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   tensorrt: | ||||
|     type: tensorrt | ||||
|     device: 0 #This is the default, select the first GPU | ||||
|  | ||||
| model: | ||||
|   path: /config/model_cache/tensorrt/yolov7-320.trt | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
|   input_tensor: nchw | ||||
|   input_pixel_format: rgb | ||||
|   width: 320 # MUST match the chosen model i.e yolov7-320 -> 320, yolov4-416 -> 416 | ||||
|   height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416 | ||||
| ``` | ||||
|  | ||||
| ## Rockchip platform | ||||
|  | ||||
| Hardware accelerated object detection is supported on the following SoCs: | ||||
| @@ -791,27 +714,66 @@ Hardware accelerated object detection is supported on the following SoCs: | ||||
| - RK3576 | ||||
| - RK3588 | ||||
|  | ||||
| This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2. | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be: | ||||
|  | ||||
| ```yaml | ||||
| detectors: | ||||
|   rknn_0: | ||||
|     type: rknn | ||||
|     num_cores: 0 | ||||
|   rknn_1: | ||||
|     type: rknn | ||||
|     num_cores: 0 | ||||
| ``` | ||||
|  | ||||
| ::: | ||||
| This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model. | ||||
|  | ||||
| ### Prerequisites | ||||
|  | ||||
| Make sure to follow the [Rockchip specific installation instructions](/frigate/installation#rockchip-platform). | ||||
| Make sure to follow the [Rockchip specific installation instrucitions](/frigate/installation#rockchip-platform). | ||||
|  | ||||
| ### Configuration | ||||
|  | ||||
| This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional. | ||||
|  | ||||
| ```yaml | ||||
| detectors: # required | ||||
|   rknn: # required | ||||
|     type: rknn # required | ||||
|     # number of NPU cores to use | ||||
|     # 0 means choose automatically | ||||
|     # increase for better performance if you have a multicore NPU e.g. set to 3 on rk3588 | ||||
|     num_cores: 0 | ||||
|  | ||||
| model: # required | ||||
|   # name of model (will be automatically downloaded) or path to your own .rknn model file | ||||
|   # possible values are: | ||||
|   # - deci-fp16-yolonas_s | ||||
|   # - deci-fp16-yolonas_m | ||||
|   # - deci-fp16-yolonas_l | ||||
|   # - /config/model_cache/your_custom_model.rknn | ||||
|   path: deci-fp16-yolonas_s | ||||
|   # width and height of detection frames | ||||
|   width: 320 | ||||
|   height: 320 | ||||
|   # pixel format of detection frame | ||||
|   # default value is rgb but yolo models usually use bgr format | ||||
|   input_pixel_format: bgr # required | ||||
|   # shape of detection frame | ||||
|   input_tensor: nhwc | ||||
|   # needs to be adjusted to model, see below | ||||
|   labelmap_path: /labelmap.txt # required | ||||
| ``` | ||||
|  | ||||
| The correct labelmap must be loaded for each model. If you use a custom model (see notes below), you must make sure to provide the correct labelmap. The table below lists the correct paths for the bundled models: | ||||
|  | ||||
| | `path`                | `labelmap_path`       | | ||||
| | --------------------- | --------------------- | | ||||
| | deci-fp16-yolonas\_\* | /labelmap/coco-80.txt | | ||||
|  | ||||
| ### Choosing a model | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html | ||||
|  | ||||
| ::: | ||||
|  | ||||
| The inference time was determined on a rk3588 with 3 NPU cores. | ||||
|  | ||||
| | Model               | Size in mb | Inference time in ms | | ||||
| | ------------------- | ---------- | -------------------- | | ||||
| | deci-fp16-yolonas_s | 24         | 25                   | | ||||
| | deci-fp16-yolonas_m | 62         | 35                   | | ||||
| | deci-fp16-yolonas_l | 81         | 45                   | | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| @@ -824,98 +786,9 @@ $ cat /sys/kernel/debug/rknpu/load | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ### Supported Models | ||||
|  | ||||
| This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional. | ||||
|  | ||||
| ```yaml | ||||
| detectors: # required | ||||
|   rknn: # required | ||||
|     type: rknn # required | ||||
|     # number of NPU cores to use | ||||
|     # 0 means choose automatically | ||||
|     # increase for better performance if you have a multicore NPU e.g. set to 3 on rk3588 | ||||
|     num_cores: 0 | ||||
| ``` | ||||
|  | ||||
| The inference time was determined on a rk3588 with 3 NPU cores. | ||||
|  | ||||
| | Model                 | Size in mb | Inference time in ms | | ||||
| | --------------------- | ---------- | -------------------- | | ||||
| | deci-fp16-yolonas_s   | 24         | 25                   | | ||||
| | deci-fp16-yolonas_m   | 62         | 35                   | | ||||
| | deci-fp16-yolonas_l   | 81         | 45                   | | ||||
| | frigate-fp16-yolov9-t | 6          | 35                   | | ||||
| | rock-i8-yolox_nano    | 3          | 14                   | | ||||
| | rock-i8_yolox_tiny    | 6          | 18                   | | ||||
|  | ||||
| - All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space. | ||||
| - You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models. | ||||
|  | ||||
| #### YOLO-NAS | ||||
|  | ||||
| ```yaml | ||||
| model: # required | ||||
|   # name of model (will be automatically downloaded) or path to your own .rknn model file | ||||
|   # possible values are: | ||||
|   # - deci-fp16-yolonas_s | ||||
|   # - deci-fp16-yolonas_m | ||||
|   # - deci-fp16-yolonas_l | ||||
|   # your yolonas_model.rknn | ||||
|   path: deci-fp16-yolonas_s | ||||
|   model_type: yolonas | ||||
|   width: 320 | ||||
|   height: 320 | ||||
|   input_pixel_format: bgr | ||||
|   input_tensor: nhwc | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html | ||||
|  | ||||
| ::: | ||||
|  | ||||
| #### YOLO (v9) | ||||
|  | ||||
| ```yaml | ||||
| model: # required | ||||
|   # name of model (will be automatically downloaded) or path to your own .rknn model file | ||||
|   # possible values are: | ||||
|   # - frigate-fp16-yolov9-t | ||||
|   # - frigate-fp16-yolov9-s | ||||
|   # - frigate-fp16-yolov9-m | ||||
|   # - frigate-fp16-yolov9-c | ||||
|   # - frigate-fp16-yolov9-e | ||||
|   # your yolo_model.rknn | ||||
|   path: frigate-fp16-yolov9-t | ||||
|   model_type: yolo-generic | ||||
|   width: 320 | ||||
|   height: 320 | ||||
|   input_tensor: nhwc | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| #### YOLOx | ||||
|  | ||||
| ```yaml | ||||
| model: # required | ||||
|   # name of model (will be automatically downloaded) or path to your own .rknn model file | ||||
|   # possible values are: | ||||
|   # - rock-i8-yolox_nano | ||||
|   # - rock-i8-yolox_tiny | ||||
|   # - rock-fp16-yolox_nano | ||||
|   # - rock-fp16-yolox_tiny | ||||
|   # your yolox_model.rknn | ||||
|   path: rock-i8-yolox_nano | ||||
|   model_type: yolox | ||||
|   width: 416 | ||||
|   height: 416 | ||||
|   input_tensor: nhwc | ||||
|   labelmap_path: /labelmap/coco-80.txt | ||||
| ``` | ||||
|  | ||||
| ### Converting your own onnx model to rknn format | ||||
|  | ||||
| To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to: | ||||
| @@ -935,7 +808,7 @@ output_name: "{input_basename}" | ||||
| config: | ||||
|   mean_values: [[0, 0, 0]] | ||||
|   std_values: [[255, 255, 255]] | ||||
|   quant_img_RGB2BGR: true | ||||
|   quant_img_rgb2bgr: true | ||||
| ``` | ||||
|  | ||||
| Explanation of the paramters: | ||||
| @@ -948,7 +821,7 @@ Explanation of the paramters: | ||||
|   - `soc`: the SoC this model was build for (e.g. "rk3588") | ||||
|   - `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0") | ||||
|   - **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`. | ||||
| - `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.2_EN.pdf). | ||||
| - `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf). | ||||
|  | ||||
| # Models | ||||
|  | ||||
| @@ -981,28 +854,9 @@ Make sure you change the batch size to 1 before exporting. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ### Download RF-DETR Model | ||||
|  | ||||
| RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size. | ||||
|  | ||||
| ```sh | ||||
| docker build . --build-arg MODEL_SIZE=Nano --output . -f- <<'EOF' | ||||
| FROM python:3.11 AS build | ||||
| RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/* | ||||
| COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/ | ||||
| WORKDIR /rfdetr | ||||
| RUN uv pip install --system rfdetr onnx onnxruntime onnxsim onnx-graphsurgeon | ||||
| ARG MODEL_SIZE | ||||
| RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export()" | ||||
| FROM scratch | ||||
| ARG MODEL_SIZE | ||||
| COPY --from=build /rfdetr/output/inference_model.onnx /rfdetr-${MODEL_SIZE}.onnx | ||||
| EOF | ||||
| ``` | ||||
|  | ||||
| ### Downloading YOLO-NAS Model | ||||
|  | ||||
| You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) which can be run directly in [Google Colab](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). | ||||
| You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| @@ -1011,43 +865,3 @@ The pre-trained YOLO-NAS weights from DeciAI are subject to their license and ca | ||||
| ::: | ||||
|  | ||||
| The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. | ||||
|  | ||||
| ### Downloading YOLO Models | ||||
|  | ||||
| #### YOLOx | ||||
|  | ||||
| YOLOx models can be downloaded [from the YOLOx repo](https://github.com/Megvii-BaseDetection/YOLOX/tree/main/demo/ONNXRuntime). | ||||
|  | ||||
| #### YOLOv3, YOLOv4, and YOLOv7 | ||||
|  | ||||
| To export as ONNX: | ||||
|  | ||||
| ```sh | ||||
| git clone https://github.com/NateMeyer/tensorrt_demos | ||||
| cd tensorrt_demos/yolo | ||||
| ./download_yolo.sh | ||||
| python3 yolo_to_onnx.py -m yolov7-320 | ||||
| ``` | ||||
|  | ||||
| #### YOLOv9 | ||||
|  | ||||
| YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available sizes are `t`, `s`, `m`, `c`, and `e`). | ||||
|  | ||||
| ```sh | ||||
| docker build . --build-arg MODEL_SIZE=t --output . -f- <<'EOF' | ||||
| FROM python:3.11 AS build | ||||
| RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/* | ||||
| COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/ | ||||
| WORKDIR /yolov9 | ||||
| ADD https://github.com/WongKinYiu/yolov9.git . | ||||
| RUN uv pip install --system -r requirements.txt | ||||
| RUN uv pip install --system onnx onnxruntime onnx-simplifier>=0.4.1 | ||||
| ARG MODEL_SIZE | ||||
| ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt | ||||
| RUN sed -i "s/ckpt = torch.load(attempt_download(w), map_location='cpu')/ckpt = torch.load(attempt_download(w), map_location='cpu', weights_only=False)/g" models/experimental.py | ||||
| RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz 320 --simplify --include onnx | ||||
| FROM scratch | ||||
| ARG MODEL_SIZE | ||||
| COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx / | ||||
| EOF | ||||
| ``` | ||||
|   | ||||
| @@ -20,5 +20,5 @@ In order to install Frigate as a PWA, the following requirements must be met: | ||||
| Installation varies slightly based on the device that is being used: | ||||
|  | ||||
| - Desktop: Use the install button typically found in right edge of the address bar | ||||
| - Android: Use the `Install as App` button in the more options menu for Chrome, and the `Add app to Home screen` button for Firefox | ||||
| - iOS: Use the `Add to Homescreen` button in the share menu | ||||
| - Android: Use the `Install as App` button in the more options menu | ||||
| - iOS: Use the `Add to Homescreen` button in the share menu | ||||
| @@ -146,7 +146,7 @@ The above configuration example can be added globally or on a per camera basis. | ||||
|  | ||||
| ## Can I have "continuous" recordings, but only at certain times? | ||||
|  | ||||
| Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. | ||||
| Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. | ||||
|  | ||||
| ## How do I export recordings? | ||||
|  | ||||
| @@ -174,10 +174,6 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ## Apple Compatibility with H.265 Streams | ||||
|  | ||||
| Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices. | ||||
|  | ||||
| ## Syncing Recordings With Disk | ||||
|  | ||||
| In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. | ||||
|   | ||||
| @@ -78,21 +78,16 @@ proxy: | ||||
|   # Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth | ||||
|   # is disabled. | ||||
|   # NOTE: Many authentication proxies pass a header downstream with the authenticated | ||||
|   #       user name and role. Not all values are supported. It must be a whitelisted header. | ||||
|   #       user name. Not all values are supported. It must be a whitelisted header. | ||||
|   #       See the docs for more info. | ||||
|   header_map: | ||||
|     user: x-forwarded-user | ||||
|     role: x-forwarded-role | ||||
|   # Optional: Url for logging out a user. This sets the location of the logout url in | ||||
|   # the UI. | ||||
|   logout_url: /api/logout | ||||
|   # Optional: Auth secret that is checked against the X-Proxy-Secret header sent from | ||||
|   # the proxy. If not set, all requests are trusted regardless of origin. | ||||
|   auth_secret: None | ||||
|   # Optional: The default role to use for proxy auth. Must be "admin" or "viewer" | ||||
|   default_role: viewer | ||||
|   # Optional: The character used to separate multiple values in the proxy headers. (default: shown below) | ||||
|   separator: "," | ||||
|  | ||||
| # Optional: Authentication configuration | ||||
| auth: | ||||
| @@ -130,7 +125,7 @@ auth: | ||||
| # NOTE: The default values are for the EdgeTPU detector. | ||||
| # Other detectors will require the model config to be set. | ||||
| model: | ||||
|   # Required: path to the model. Frigate+ models use plus://<model_id> (default: automatic based on detector) | ||||
|   # Required: path to the model (default: automatic based on detector) | ||||
|   path: /edgetpu_model.tflite | ||||
|   # Required: path to the labelmap (default: shown below) | ||||
|   labelmap_path: /labelmap.txt | ||||
| @@ -548,37 +543,17 @@ semantic_search: | ||||
|   model_size: "small" | ||||
|  | ||||
| # Optional: Configuration for face recognition capability | ||||
| # NOTE: enabled, min_area can be overridden at the camera level | ||||
| face_recognition: | ||||
|   # Optional: Enable face recognition (default: shown below) | ||||
|   # Optional: Enable semantic search (default: shown below) | ||||
|   enabled: False | ||||
|   # Optional: Minimum face distance score required to mark as a potential match (default: shown below) | ||||
|   unknown_score: 0.8 | ||||
|   # Optional: Minimum face detection score required to detect a face (default: shown below) | ||||
|   # NOTE: This only applies when not running a Frigate+ model | ||||
|   detection_threshold: 0.7 | ||||
|   # Optional: Minimum face distance score required to be considered a match (default: shown below) | ||||
|   recognition_threshold: 0.9 | ||||
|   # Optional: Min area of detected face box to consider running face recognition (default: shown below) | ||||
|   min_area: 500 | ||||
|   # Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below) | ||||
|   min_faces: 1 | ||||
|   # Optional: Number of images of recognized faces to save for training (default: shown below) | ||||
|   save_attempts: 100 | ||||
|   # Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below) | ||||
|   blur_confidence_filter: True | ||||
|   # Optional: Set the model size used face recognition. (default: shown below) | ||||
|   model_size: small | ||||
|   # Optional: Set the model size used for embeddings. (default: shown below) | ||||
|   # NOTE: small model runs on CPU and large model runs on GPU | ||||
|   model_size: "small" | ||||
|  | ||||
| # Optional: Configuration for license plate recognition capability | ||||
| # NOTE: enabled, min_area, and enhancement can be overridden at the camera level | ||||
| lpr: | ||||
|   # Optional: Enable license plate recognition (default: shown below) | ||||
|   enabled: False | ||||
|   # Optional: The device to run the models on (default: shown below) | ||||
|   device: CPU | ||||
|   # Optional: Set the model size used for text detection. (default: shown below) | ||||
|   model_size: small | ||||
|   # Optional: License plate object confidence score required to begin running recognition (default: shown below) | ||||
|   detection_threshold: 0.7 | ||||
|   # Optional: Minimum area of license plate to begin running recognition (default: shown below) | ||||
| @@ -593,11 +568,6 @@ lpr: | ||||
|   match_distance: 1 | ||||
|   # Optional: Known plates to track (strings or regular expressions) (default: shown below) | ||||
|   known_plates: {} | ||||
|   # Optional: Enhance the detected plate image with contrast adjustment and denoising (default: shown below) | ||||
|   # A value between 0 and 10. Higher values are not always better and may perform worse than lower values. | ||||
|   enhancement: 0 | ||||
|   # Optional: Save plate images to /media/frigate/clips/lpr for debugging purposes (default: shown below) | ||||
|   debug_save_plates: False | ||||
|  | ||||
| # Optional: Configuration for AI generated tracked object descriptions | ||||
| # WARNING: Depending on the provider, this will send thumbnails over the internet | ||||
| @@ -621,7 +591,7 @@ genai: | ||||
|     person: "My special person prompt." | ||||
|  | ||||
| # Optional: Restream configuration | ||||
| # Uses https://github.com/AlexxIT/go2rtc (v1.9.9) | ||||
| # Uses https://github.com/AlexxIT/go2rtc (v1.9.2) | ||||
| # NOTE: The default go2rtc API port (1984) must be used, | ||||
| #       changing this port for the integrated go2rtc instance is not supported. | ||||
| go2rtc: | ||||
| @@ -675,9 +645,6 @@ cameras: | ||||
|     # If disabled: config is used but no live stream and no capture etc. | ||||
|     # Events/Recordings are still viewable. | ||||
|     enabled: True | ||||
|     # Optional: camera type used for some Frigate features (default: shown below) | ||||
|     # Options are "generic" and "lpr" | ||||
|     type: "generic" | ||||
|     # Required: ffmpeg settings for the camera | ||||
|     ffmpeg: | ||||
|       # Required: A list of input streams for the camera. See documentation for more information. | ||||
| @@ -903,12 +870,12 @@ telemetry: | ||||
|     # Optional: Enable Intel GPU stats (default: shown below) | ||||
|     intel_gpu_stats: True | ||||
|     # Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below) | ||||
|     intel_gpu_device: None | ||||
|     sriov: False | ||||
|     # Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below) | ||||
|     # NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled. | ||||
|     network_bandwidth: False | ||||
|   # Optional: Enable the latest version outbound check (default: shown below) | ||||
|   # NOTE: If you use the Home Assistant integration, disabling this will prevent it from reporting new versions | ||||
|   # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions | ||||
|   version_check: True | ||||
|  | ||||
| # Optional: Camera groups (default: no groups are setup) | ||||
|   | ||||
| @@ -7,7 +7,7 @@ title: Restream | ||||
|  | ||||
| Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. | ||||
|  | ||||
| Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.9) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration) for more advanced configurations and features. | ||||
| Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| @@ -134,7 +134,7 @@ cameras: | ||||
|  | ||||
| ## Handling Complex Passwords | ||||
|  | ||||
| go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose. | ||||
| go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.  | ||||
|  | ||||
| For example: | ||||
|  | ||||
| @@ -152,11 +152,11 @@ go2rtc: | ||||
|     my_camera: rtsp://username:$%40foo%25@192.168.1.100 | ||||
| ``` | ||||
|  | ||||
| See [this comment](https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information. | ||||
| See [this comment(https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information. | ||||
|  | ||||
| ## Advanced Restream Configurations | ||||
|  | ||||
| The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: | ||||
| The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: | ||||
|  | ||||
| NOTE: The output will need to be passed with two curly braces `{{output}}` | ||||
|  | ||||
|   | ||||
| @@ -21,21 +21,6 @@ In 0.14 and later, all of that is bundled into a single review item which starts | ||||
|  | ||||
| Not every segment of video captured by Frigate may be of the same level of interest to you. Video of people who enter your property may be a different priority than those walking by on the sidewalk. For this reason, Frigate 0.14 categorizes review items as _alerts_ and _detections_. By default, all person and car objects are considered alerts. You can refine categorization of your review items by configuring required zones for them. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add the following to your config: | ||||
|  | ||||
| ```yaml | ||||
| objects: | ||||
|   track: | ||||
|     - person | ||||
|     - car | ||||
|     - ... | ||||
| ``` | ||||
|  | ||||
| See the [objects documentation](objects.md) for the list of objects that Frigate's default model tracks. | ||||
| ::: | ||||
|  | ||||
| ## Restricting alerts to specific labels | ||||
|  | ||||
| By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config: | ||||
|   | ||||
| @@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Enrichments Settings page before it can be used. Semantic Search is a global configuration setting. | ||||
| Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting. | ||||
|  | ||||
| ```yaml | ||||
| semantic_search: | ||||
| @@ -29,9 +29,9 @@ semantic_search: | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| The embeddings database can be re-indexed from the existing tracked objects in your database by pressing the "Reindex" button in the Enrichments Settings in the UI or by adding `reindex: True` to your `semantic_search` configuration and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. | ||||
| The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again. | ||||
|  | ||||
| If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to reindex as described above. | ||||
| If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -72,7 +72,7 @@ For most users, especially native English speakers, the V1 model remains the rec | ||||
|  | ||||
| :::note | ||||
|  | ||||
| Switching between V1 and V2 requires reindexing your embeddings. The embeddings from V1 and V2 are incompatible, and failing to reindex will result in incorrect search results. | ||||
| Switching between V1 and V2 requires reindexing your embeddings. To do this, set `reindex: True` in your Semantic Search configuration and restart Frigate. The embeddings from V1 and V2 are incompatible, and failing to reindex will result in incorrect search results. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -90,7 +90,19 @@ semantic_search: | ||||
|  | ||||
| If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically. | ||||
|  | ||||
| See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. | ||||
| **NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU. | ||||
|  | ||||
| - **AMD** | ||||
|  | ||||
|   - ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image. | ||||
|  | ||||
| - **Intel** | ||||
|  | ||||
|   - OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image. | ||||
|  | ||||
| - **Nvidia** | ||||
|   - Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image. | ||||
|   - Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image. | ||||
|  | ||||
| ::: | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ title: Snapshots | ||||
|  | ||||
| Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `<camera>-<id>.jpg`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx) | ||||
|  | ||||
| Snapshots are accessible in the UI in the Explore pane. This allows for quick submission to the Frigate+ service. | ||||
| For users with Frigate+ enabled, snapshots are accessible in the UI in the Frigate+ pane to allow for quick submission to the Frigate+ service. | ||||
|  | ||||
| To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones) | ||||
|  | ||||
|   | ||||
| @@ -36,8 +36,8 @@ Note that certbot uses symlinks, and those can't be followed by the container un | ||||
| frigate: | ||||
|   ... | ||||
|   volumes: | ||||
|     - /etc/letsencrypt/live/your.fqdn.net:/etc/letsencrypt/live/frigate:ro | ||||
|     - /etc/letsencrypt/archive/your.fqdn.net:/etc/letsencrypt/archive/your.fqdn.net:ro | ||||
|     - /etc/letsencrypt/live/frigate:/etc/letsencrypt/live/frigate:ro | ||||
|     - /etc/letsencrypt/archive/frigate:/etc/letsencrypt/archive/frigate:ro | ||||
|   ... | ||||
|  | ||||
| ``` | ||||
|   | ||||
| @@ -84,13 +84,7 @@ Only car objects can trigger the `front_yard_street` zone and only person can tr | ||||
|  | ||||
| ### Zone Loitering | ||||
|  | ||||
| Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone. | ||||
|  | ||||
| :::note | ||||
|  | ||||
| When using loitering zones, a review item will remain active until the object leaves. Loitering zones are only meant to be used in areas where loitering is not expected behavior. | ||||
|  | ||||
| ::: | ||||
| Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time before the object will be considered in the zone. | ||||
|  | ||||
| ```yaml | ||||
| cameras: | ||||
| @@ -136,7 +130,7 @@ Your zone must be defined with exactly 4 points and should be aligned to the gro | ||||
|  | ||||
|  | ||||
|  | ||||
| Speed estimation requires a minimum number of frames for your object to be tracked before a valid estimate can be calculated, so create your zone away from places where objects enter and exit for the best results. The object's bounding box must be stable and remain a constant size as it enters and exits the zone. _Your zone should not take up the full frame, and the zone does **not** need to be the same size or larger than the objects passing through it._ An object's speed is tracked while it passes through the zone and then saved to Frigate's database. | ||||
| Speed estimation requires a minimum number of frames for your object to be tracked before a valid estimate can be calculated, so create your zone away from places where objects enter and exit for the best results. _Your zone should not take up the full frame._ An object's speed is tracked while it is in the zone and then saved to Frigate's database. | ||||
|  | ||||
| Accurate real-world distance measurements are required to estimate speeds. These distances can be specified in your zone config through the `distances` field. | ||||
|  | ||||
| @@ -165,9 +159,8 @@ These speed values are output as a number in miles per hour (mph) or kilometers | ||||
|  | ||||
| #### Best practices and caveats | ||||
|  | ||||
| - Speed estimation works best with a straight road or path when your object travels in a straight line across that path. Avoid creating your zone near intersections or anywhere that objects would make a turn. | ||||
| - Create a zone where the bottom center of your object's bounding box travels directly through it and does not become obscured at any time. | ||||
| - A large zone can be used (as in the photo example above), but it may cause inaccurate estimation if the object's bounding box changes shape (such as when it turns or becomes partially hidden). Generally it's best to make your zone large enough to capture a few frames, but small enough so that the bounding box doesn't change size as it enters, travels through, and exits the zone. | ||||
| - Speed estimation works best with a straight road or path when your object travels in a straight line across that path. Avoid creating your zone near intersections or anywhere that objects would make a turn. If the bounding box changes shape (either because the object made a turn or became partially obscured, for example), speed estimation will not be accurate. | ||||
| - Create a zone where the bottom center of your object's bounding box travels directly through it and does not become obscured at any time. See the photo example above. | ||||
| - Depending on the size and location of your zone, you may want to decrease the zone's `inertia` value from the default of 3. | ||||
| - The more accurate your real-world dimensions can be measured, the more accurate speed estimation will be. However, due to the way Frigate's tracking algorithm works, you may need to tweak the real-world distance values so that estimated speeds better match real-world speeds. | ||||
| - Once an object leaves the zone, speed accuracy will likely decrease due to perspective distortion and misalignment with the calibrated area. Therefore, speed values will show as a zero through MQTT and will not be visible on the debug view when an object is outside of a speed tracking zone. | ||||
|   | ||||
| @@ -72,17 +72,17 @@ COPY --from=rootfs / / | ||||
| The images for each board will be built for each Frigate release, this is done in the `.github/workflows/ci.yml` file. The board build workflow will need to be added here. | ||||
|  | ||||
| ```yml | ||||
| - name: Build and push board build | ||||
|   uses: docker/bake-action@v3 | ||||
|   with: | ||||
|     push: true | ||||
|     targets: board # this is the target in the board.hcl file | ||||
|     files: docker/board/board.hcl # this should be updated with the actual board type | ||||
|     # the tags should be updated with the actual board types as well | ||||
|     # the community board builds should never push to cache, but it can pull from cache | ||||
|     set: | | ||||
|       board.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-board | ||||
|       *.cache-from=type=gha | ||||
|       - name: Build and push board build | ||||
|         uses: docker/bake-action@v3 | ||||
|         with: | ||||
|           push: true | ||||
|           targets: board # this is the target in the board.hcl file | ||||
|           files: docker/board/board.hcl # this should be updated with the actual board type | ||||
|           # the tags should be updated with the actual board types as well | ||||
|           # the community board builds should never push to cache, but it can pull from cache | ||||
|           set: | | ||||
|             board.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-board | ||||
|             *.cache-from=type=gha | ||||
| ``` | ||||
|  | ||||
| ### Code Owner File | ||||
| @@ -91,4 +91,4 @@ The `CODEOWNERS` file should be updated to include the `docker/board` along with | ||||
|  | ||||
| # Docs | ||||
|  | ||||
| At a minimum the `installation`, `object_detectors`, `hardware_acceleration_video`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board. | ||||
| At a minimum the `installation`, `object_detectors`, `hardware_acceleration`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board. | ||||
|   | ||||
| @@ -17,15 +17,15 @@ From here, follow the guides for: | ||||
| - [Web Interface](#web-interface) | ||||
| - [Documentation](#documentation) | ||||
|  | ||||
| ### Frigate Home Assistant Add-on | ||||
| ### Frigate Home Assistant Addon | ||||
|  | ||||
| This repository holds the Home Assistant Add-on, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab. | ||||
| This repository holds the Home Assistant Addon, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab. | ||||
|  | ||||
| Fork [blakeblackshear/frigate-hass-addons](https://github.com/blakeblackshear/frigate-hass-addons) to your own Github profile, then clone the forked repo to your local machine. | ||||
|  | ||||
| ### Frigate Home Assistant Integration | ||||
|  | ||||
| This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you are running Frigate as a standalone Docker container or as a [Home Assistant Add-on](#frigate-home-assistant-add-on). | ||||
| This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you run that with the [addon](#frigate-home-assistant-addon) or in a separate Docker instance. | ||||
|  | ||||
| Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshear/frigate-hass-integration) to your own GitHub profile, then clone the forked repo to your local machine. | ||||
|  | ||||
| @@ -77,15 +77,14 @@ Create and place these files in a `debug` folder in the root of the repo. This i | ||||
|  | ||||
| #### 4. Run Frigate from the command line | ||||
|  | ||||
| VS Code will start the Docker Compose file for you and open a terminal window connected to `frigate-dev`. | ||||
| VSCode will start the docker compose file for you and open a terminal window connected to `frigate-dev`. | ||||
|  | ||||
| - Depending on what hardware you're developing on, you may need to amend `docker-compose.yml` in the project root to pass through a USB Coral or GPU for hardware acceleration. | ||||
| - Run `python3 -m frigate` to start the backend. | ||||
| - In a separate terminal window inside VS Code, change into the `web` directory and run `npm install && npm run dev` to start the frontend. | ||||
|  | ||||
| #### 5. Teardown | ||||
|  | ||||
| After closing VS Code, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers. | ||||
| After closing VSCode, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers. | ||||
|  | ||||
| ### Testing | ||||
|  | ||||
| @@ -236,11 +235,3 @@ When testing nginx config changes from within the dev container, the following c | ||||
| ```console | ||||
| sudo cp docker/main/rootfs/usr/local/nginx/conf/* /usr/local/nginx/conf/ && sudo /usr/local/nginx/sbin/nginx -s reload | ||||
| ``` | ||||
|  | ||||
| ## Contributing translations of the Web UI | ||||
|  | ||||
| Frigate uses [Weblate](https://weblate.org) to manage translations of the Web UI. To contribute translation, sign up for an account at Weblate and navigate to the Frigate NVR project: | ||||
|  | ||||
| https://hosted.weblate.org/projects/frigate-nvr/ | ||||
|  | ||||
| When translating, maintain the existing key structure while translating only the values. Ensure your translations maintain proper formatting, including any placeholder variables (like `{{example}}`). | ||||
|   | ||||
| @@ -3,7 +3,7 @@ id: camera_setup | ||||
| title: Camera setup | ||||
| --- | ||||
|  | ||||
| Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Firefox 134+/136+/137+ (Windows/Mac/Linux & Android), Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around. | ||||
| Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around. | ||||
|  | ||||
| - **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher (10fps is the recommended maximum for most users) for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server. | ||||
|  | ||||
| @@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings: | ||||
| - Encode Mode: H.264 | ||||
| - Resolution: 2688\*1520 | ||||
| - Frame Rate(FPS): 15 | ||||
| - I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](/configuration/live#camera_settings_recommendations) for more info) | ||||
| - I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info) | ||||
|  | ||||
| **Sub Stream (Detection)** | ||||
|  | ||||
|   | ||||
| @@ -66,4 +66,4 @@ The time period starting when a tracked object entered the frame and ending when | ||||
|  | ||||
| ## Zone | ||||
|  | ||||
| Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create a [review item](#review-item). [See the zone docs for more info](/configuration/zones) | ||||
| Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones) | ||||
|   | ||||
| @@ -9,184 +9,111 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility | ||||
|  | ||||
| I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options. | ||||
|  | ||||
| WiFi cameras are not recommended as [their streams are less reliable and cause connection loss and/or lost video data](https://ipcamtalk.com/threads/camera-conflicts.68142/#post-738821), especially when more than a few WiFi cameras will be used at the same time. | ||||
| Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data. | ||||
|  | ||||
| Many users have reported various issues with 4K-plus Reolink cameras, it is best to stick with 5MP and lower for Reolink cameras. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). | ||||
|  | ||||
| Here are some of the cameras I recommend: | ||||
| Here are some of the camera's I recommend: | ||||
|  | ||||
| - <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link) | ||||
| - <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link) | ||||
| - <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link) | ||||
| - <a href="https://amzn.to/4ltOpaC" target="_blank" rel="nofollow noopener sponsored">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link) | ||||
|  | ||||
| I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. | ||||
|  | ||||
| ## Server | ||||
|  | ||||
| My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral, Hailo, or other AI accelerators. | ||||
|  | ||||
| Note that many of these mini PCs come with Windows pre-installed, and you will need to install Linux according to the [getting started guide](../guides/getting_started.md). | ||||
|  | ||||
| I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| If the EQ13 is out of stock, the link below may take you to a suggested alternative on Amazon. The Beelink EQ14 has some known compatibility issues, so you should avoid that model for now. | ||||
|  | ||||
| ::: | ||||
| My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. | ||||
|  | ||||
| | Name                                                                                                          | Coral Inference Speed | Coral Compatibility | Notes                                                                                     | | ||||
| | ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- | | ||||
| | Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms                | USB                 | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | | ||||
| | Beelink EQ13 (<a href="https://amzn.to/4iQaBKu" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms                | USB                 | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | | ||||
|  | ||||
| ## Detectors | ||||
|  | ||||
| A detector is a device which is optimized for running inferences efficiently to detect objects. Using a recommended detector means there will be less latency between detections and more detections can be run per second. Frigate is designed around the expectation that a detector is used to achieve very low inference speeds. Offloading TensorFlow to a detector is an order of magnitude faster and will reduce your CPU load dramatically. | ||||
|  | ||||
| :::info | ||||
|  | ||||
| Frigate supports multiple different detectors that work on different types of hardware: | ||||
|  | ||||
| **Most Hardware** | ||||
|  | ||||
| - [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices offering a wide range of compatibility with devices. | ||||
|  | ||||
|   - [Supports many model architectures](../../configuration/object_detectors#configuration) | ||||
|   - Runs best with tiny or small size models | ||||
|  | ||||
| - [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. | ||||
|   - [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector) | ||||
|  | ||||
| **AMD** | ||||
|  | ||||
| - [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection | ||||
|   - [Supports limited model architectures](../../configuration/object_detectors#supported-models-1) | ||||
|   - Runs best on discrete AMD GPUs | ||||
|  | ||||
| **Intel** | ||||
|  | ||||
| - [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. | ||||
|   - [Supports majority of model architectures](../../configuration/object_detectors#supported-models) | ||||
|   - Runs best with tiny, small, or medium models | ||||
|  | ||||
| **Nvidia** | ||||
|  | ||||
| - [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices. | ||||
|   - [Supports majority of model architectures via ONNX](../../configuration/object_detectors#supported-models-2) | ||||
|   - Runs well with any size models including large | ||||
|  | ||||
| **Rockchip** | ||||
|  | ||||
| - [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs to provide efficient object detection. | ||||
|   - [Supports limited model architectures](../../configuration/object_detectors#choosing-a-model) | ||||
|   - Runs best with tiny or small size models | ||||
|   - Runs efficiently on low power hardware | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ### Hailo-8 | ||||
|  | ||||
| Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isn’t provided. | ||||
|  | ||||
| **Default Model Configuration:** | ||||
|  | ||||
| - **Hailo-8L:** Default model is **YOLOv6n**. | ||||
| - **Hailo-8:** Default model is **YOLOv6n**. | ||||
|  | ||||
| In real-world deployments, even with multiple cameras running concurrently, Frigate has demonstrated consistent performance. Testing on x86 platforms—with dual PCIe lanes—yields further improvements in FPS, throughput, and latency compared to the Raspberry Pi setup. | ||||
|  | ||||
| | Name             | Hailo‑8 Inference Time | Hailo‑8L Inference Time | | ||||
| | ---------------- | ---------------------- | ----------------------- | | ||||
| | ssd mobilenet v1 | ~ 6 ms                 | ~ 10 ms                 | | ||||
| | yolov6n          | ~ 7 ms                 | ~ 11 ms                 | | ||||
| A detector is a device which is optimized for running inferences efficiently to detect objects. Using a recommended detector means there will be less latency between detections and more detections can be run per second. Frigate is designed around the expectation that a detector is used to achieve very low inference speeds. Offloading TensorFlow to a detector is an order of magnitude faster and will reduce your CPU load dramatically. As of 0.12, Frigate supports a handful of different detector types with varying inference speeds and performance. | ||||
|  | ||||
| ### Google Coral TPU | ||||
|  | ||||
| Frigate supports both the USB and M.2 versions of the Google Coral. | ||||
| It is strongly recommended to use a Google Coral. A $60 device will outperform $2000 CPU. Frigate should work with any supported Coral device from https://coral.ai | ||||
|  | ||||
| - The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions. | ||||
| - The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai | ||||
| The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions. | ||||
|  | ||||
| The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai | ||||
|  | ||||
| A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed. | ||||
|  | ||||
| ### OpenVINO - Intel | ||||
| ### OpenVINO | ||||
|  | ||||
| The OpenVINO detector type is able to run on: | ||||
|  | ||||
| - 6th Gen Intel Platforms and newer that have an iGPU | ||||
| - x86 hosts with an Intel Arc GPU | ||||
| - x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2) | ||||
| - Most modern AMD CPUs (though this is officially not supported by Intel) | ||||
| - x86 & Arm64 hosts via CPU (generally not recommended) | ||||
|  | ||||
| :::note | ||||
|  | ||||
| Intel NPUs have seen [limited success in community deployments](https://github.com/blakeblackshear/frigate/discussions/13248#discussioncomment-12347357), although they remain officially unsupported. | ||||
|  | ||||
| In testing, the NPU delivered performance that was only comparable to — or in some cases worse than — the integrated GPU. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) | ||||
|  | ||||
| Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: | ||||
|  | ||||
| | Name           | MobileNetV2 Inference Time | YOLO-NAS Inference Time   | RF-DETR Inference Time | Notes                              | | ||||
| | -------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- | | ||||
| | Intel HD 530   | 15 - 35 ms                 |                           |                        | Can only run one detector instance | | ||||
| | Intel HD 620   | 15 - 25 ms                 | 320: ~ 35 ms              |                        |                                    | | ||||
| | Intel HD 630   | ~ 15 ms                    | 320: ~ 30 ms              |                        |                                    | | ||||
| | Intel UHD 730  | ~ 10 ms                    | 320: ~ 19 ms 640: ~ 54 ms |                        |                                    | | ||||
| | Intel UHD 770  | ~ 15 ms                    | 320: ~ 20 ms 640: ~ 46 ms |                        |                                    | | ||||
| | Intel N100     | ~ 15 ms                    | 320: ~ 25 ms              |                        | Can only run one detector instance | | ||||
| | Intel Iris XE  | ~ 10 ms                    | 320: ~ 18 ms 640: ~ 50 ms |                        |                                    | | ||||
| | Intel Arc A380 | ~ 6 ms                     | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms  |                                    | | ||||
| | Intel Arc A750 | ~ 4 ms                     | 320: ~ 8 ms               |                        |                                    | | ||||
| | Name                 | MobileNetV2 Inference Time | YOLO-NAS Inference Time   | Notes                                  | | ||||
| | -------------------- | -------------------------- | ------------------------- | -------------------------------------- | | ||||
| | Intel Celeron J4105  | ~ 25 ms                    |                           | Can only run one detector instance     | | ||||
| | Intel Celeron N3060  | 130 - 150 ms               |                           | Can only run one detector instance     | | ||||
| | Intel Celeron N3205U | ~ 120 ms                   |                           | Can only run one detector instance     | | ||||
| | Intel Celeron N4020  | 50 - 200 ms                |                           | Inference speed depends on other loads | | ||||
| | Intel i3 6100T       | 15 - 35 ms                 |                           | Can only run one detector instance     | | ||||
| | Intel i3 8100        | ~ 15 ms                    |                           |                                        | | ||||
| | Intel i5 4590        | ~ 20 ms                    |                           |                                        | | ||||
| | Intel i5 6500        | ~ 15 ms                    |                           |                                        | | ||||
| | Intel i5 7200u       | 15 - 25 ms                 |                           |                                        | | ||||
| | Intel i5 7500        | ~ 15 ms                    |                           |                                        | | ||||
| | Intel i5 1135G7      | 10 - 15 ms                 |                           |                                        | | ||||
| | Intel i3 12000       |                            | 320: ~ 19 ms 640: ~ 54 ms |                                        | | ||||
| | Intel i5 12600K      | ~ 15 ms                    | 320: ~ 20 ms 640: ~ 46 ms |                                        | | ||||
| | Intel Arc A380       | ~ 6 ms                     | 320: ~ 10 ms              |                                        | | ||||
| | Intel Arc A750       | ~ 4 ms                     | 320: ~ 8 ms               |                                        | | ||||
|  | ||||
| ### TensorRT - Nvidia GPU | ||||
|  | ||||
| Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA libraries. | ||||
|  | ||||
| #### Minimum Hardware Support | ||||
|  | ||||
|  12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. | ||||
|  | ||||
| Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. | ||||
|  | ||||
| There are improved capabilities in newer GPU architectures that TensorRT can benefit from, such as INT8 operations and Tensor cores. The features compatible with your hardware will be optimized when the model is converted to a trt file. Currently the script provided for generating the model provides a switch to enable/disable FP16 operations. If you wish to use newer features such as INT8 optimization, more work is required. | ||||
|  | ||||
| #### Compatibility References: | ||||
|  | ||||
| [NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-841/support-matrix/index.html) | ||||
|  | ||||
| [NVIDIA CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html) | ||||
|  | ||||
| [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) | ||||
| The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). | ||||
|  | ||||
| Inference speeds will vary greatly depending on the GPU and the model used. | ||||
| `tiny` variants are faster than the equivalent non-tiny model, some known examples are below: | ||||
|  | ||||
| | Name            | YOLOv9 Inference Time | YOLO-NAS Inference Time   | RF-DETR Inference Time | | ||||
| | --------------- | --------------------- | ------------------------- | ---------------------- | | ||||
| | RTX 3050        | t-320: 15 ms          | 320: ~ 10 ms 640: ~ 16 ms | Nano-320: ~ 12 ms      | | ||||
| | RTX 3070        | t-320: 11 ms          | 320: ~ 8 ms 640: ~ 14 ms  | Nano-320: ~ 9 ms       | | ||||
| | RTX A4000       |                       | 320: ~ 15 ms              |                        | | ||||
| | Tesla P40       |                       | 320: ~ 105 ms             |                        | | ||||
| | Name            | YoloV7 Inference Time | YOLO-NAS Inference Time   | | ||||
| | --------------- | --------------------- | ------------------------- | | ||||
| | GTX 1060 6GB    | ~ 7 ms                |                           | | ||||
| | GTX 1070        | ~ 6 ms                |                           | | ||||
| | GTX 1660 SUPER  | ~ 4 ms                |                           | | ||||
| | RTX 3050        | 5 - 7 ms              | 320: ~ 10 ms 640: ~ 16 ms | | ||||
| | RTX 3070 Mobile | ~ 5 ms                |                           | | ||||
| | Quadro P400 2GB | 20 - 25 ms            |                           | | ||||
| | Quadro P2000    | ~ 12 ms               |                           | | ||||
|  | ||||
| ### ROCm - AMD GPU | ||||
| ### AMD GPUs | ||||
|  | ||||
| With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs. | ||||
|  | ||||
| | Name      | YOLOv9 Inference Time | YOLO-NAS Inference Time   | | ||||
| | --------- | --------------------- | ------------------------- | | ||||
| | AMD 780M  | ~ 14 ms               | 320: ~ 25 ms 640: ~ 50 ms | | ||||
| | AMD 8700G |                       | 320: ~ 20 ms 640: ~ 40 ms | | ||||
| ### Hailo-8 | ||||
|  | ||||
| | Name            | Hailo‑8 Inference Time | Hailo‑8L Inference Time | | ||||
| | --------------- | ---------------------- | ----------------------- | | ||||
| | ssd mobilenet v1| ~ 6 ms                 | ~ 10 ms                 | | ||||
| | yolov6n         | ~ 7 ms                 | ~ 11 ms                 | | ||||
|  | ||||
|  | ||||
| Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isn’t provided. | ||||
|  | ||||
| **Default Model Configuration:** | ||||
| - **Hailo-8L:** Default model is **YOLOv6n**. | ||||
| - **Hailo-8:** Default model is **YOLOv6n**. | ||||
|  | ||||
| In real-world deployments, even with multiple cameras running concurrently, Frigate has demonstrated consistent performance. Testing on x86 platforms—with dual PCIe lanes—yields further improvements in FPS, throughput, and latency compared to the Raspberry Pi setup. | ||||
|  | ||||
|  | ||||
| ## Community Supported Detectors | ||||
|  | ||||
| ### Nvidia Jetson | ||||
|  | ||||
| Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). | ||||
| Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). | ||||
|  | ||||
| Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. | ||||
|  | ||||
| @@ -200,11 +127,6 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard | ||||
| - RK3576 | ||||
| - RK3588 | ||||
|  | ||||
| | Name           | YOLOv9 Inference Time | YOLO-NAS Inference Time     | YOLOx Inference Time    | | ||||
| | -------------- | --------------------- | --------------------------- | ----------------------- | | ||||
| | rk3588 3 cores | tiny: ~ 35 ms         | small: ~ 20 ms med: ~ 30 ms | nano: 14 ms tiny: 18 ms | | ||||
| | rk3566 1 core  |                       | small: ~ 96 ms              |                         | | ||||
|  | ||||
| The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s. | ||||
|  | ||||
| ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) | ||||
| @@ -227,4 +149,4 @@ Basically - When you increase the resolution and/or the frame rate of the stream | ||||
|  | ||||
| YES! The Coral does not help with decoding video streams. | ||||
|  | ||||
| Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work. | ||||
| Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://blog.video.ibm.com/streaming-video-tips/keyframes-interframe-video-compression/). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work. | ||||
|   | ||||
| @@ -6,7 +6,7 @@ slug: / | ||||
|  | ||||
| A complete and local NVR designed for Home Assistant with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. | ||||
|  | ||||
| Use of a [Recommended Detector](/frigate/hardware#detectors) is optional, but strongly recommended. CPU detection should only be used for testing purposes. | ||||
| Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but strongly recommended. CPU detection should only be used for testing purposes. The Coral will outperform even the best CPUs and can process 100+ FPS with very little overhead. | ||||
|  | ||||
| - Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration) | ||||
| - Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary | ||||
|   | ||||
| @@ -3,11 +3,11 @@ id: installation | ||||
| title: Installation | ||||
| --- | ||||
|  | ||||
| Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant Add-on](https://www.home-assistant.io/addons/). Note that the Home Assistant Add-on is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant Add-on. | ||||
| Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant. | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| If you already have Frigate installed as a Home Assistant Add-on, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate. | ||||
| If you already have Frigate installed as a Home Assistant addon, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -45,7 +45,7 @@ The following ports are used by Frigate and can be mapped via docker as required | ||||
| | `8554` | RTSP restreaming. By default, these streams are unauthenticated. Authentication can be configured in go2rtc section of config.                                             | | ||||
| | `8555` | WebRTC connections for low latency live views.                                                                                                                             | | ||||
|  | ||||
| #### Common Docker Compose storage configurations | ||||
| #### Common docker compose storage configurations | ||||
|  | ||||
| Writing to a local disk or external USB drive: | ||||
|  | ||||
| @@ -73,7 +73,7 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your | ||||
|  | ||||
| Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**. | ||||
|  | ||||
| The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in Docker Compose). | ||||
| The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose). | ||||
|  | ||||
| The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well. | ||||
|  | ||||
| @@ -145,7 +145,7 @@ $ sudo cat /sys/kernel/debug/rknpu/version | ||||
| RKNPU driver: v0.9.2 # or later version | ||||
| ``` | ||||
|  | ||||
| I recommend [Armbian](https://www.armbian.com/download/?arch=aarch64), if your board is supported. | ||||
| I recommend [Joshua Riek's Ubuntu for Rockchip](https://github.com/Joshua-Riek/ubuntu-rockchip), if your board is supported. | ||||
|  | ||||
| #### Setup | ||||
|  | ||||
| @@ -165,8 +165,6 @@ devices: | ||||
|   - /dev/dma_heap | ||||
|   - /dev/rga | ||||
|   - /dev/mpp_service | ||||
| volumes: | ||||
|   - /sys/:/sys/:ro | ||||
| ``` | ||||
|  | ||||
| or add these options to your `docker run` command: | ||||
| @@ -177,19 +175,19 @@ or add these options to your `docker run` command: | ||||
| --device /dev/dri \ | ||||
| --device /dev/dma_heap \ | ||||
| --device /dev/rga \ | ||||
| --device /dev/mpp_service \ | ||||
| --volume /sys/:/sys/:ro | ||||
| --device /dev/mpp_service | ||||
| ``` | ||||
|  | ||||
| #### Configuration | ||||
|  | ||||
| Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform). | ||||
| Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration#rockchip-platform). | ||||
|  | ||||
| ## Docker | ||||
|  | ||||
| Running through Docker with Docker Compose is the recommended install method. | ||||
| Running in Docker with compose is the recommended install method. | ||||
|  | ||||
| ```yaml | ||||
| version: "3.9" | ||||
| services: | ||||
|   frigate: | ||||
|     container_name: frigate | ||||
| @@ -221,7 +219,7 @@ services: | ||||
|       FRIGATE_RTSP_PASSWORD: "password" | ||||
| ``` | ||||
|  | ||||
| If you can't use Docker Compose, you can run the container with something similar to this: | ||||
| If you can't use docker compose, you can run the container with something similar to this: | ||||
|  | ||||
| ```bash | ||||
| docker run -d \ | ||||
| @@ -245,23 +243,25 @@ docker run -d \ | ||||
|  | ||||
| The official docker image tags for the current stable version are: | ||||
|  | ||||
| - `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64. This build includes support for Hailo devices as well. | ||||
| - `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64 | ||||
| - `stable-standard-arm64` - Standard Frigate build for arm64 | ||||
| - `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU | ||||
| - `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector) | ||||
|  | ||||
| The community supported docker image tags for the current stable version are: | ||||
|  | ||||
| - `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5 | ||||
| - `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6 | ||||
| - `stable-rk` - Frigate build for SBCs with Rockchip SoC | ||||
| - `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector) | ||||
|   - `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat | ||||
|  | ||||
| ## Home Assistant Add-on | ||||
| ## Home Assistant Addon | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| As of Home Assistant Operating System 10.2 and Home Assistant 2023.6 defining separate network storage for media is supported. | ||||
| As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported. | ||||
|  | ||||
| There are important limitations in HA OS to be aware of: | ||||
| There are important limitations in Home Assistant Operating System to be aware of: | ||||
|  | ||||
| - Separate local storage for media is not yet supported by Home Assistant | ||||
| - AMD GPUs are not supported because HA OS does not include the mesa driver. | ||||
| @@ -275,27 +275,24 @@ See [the network storage guide](/guides/ha_network_storage.md) for instructions | ||||
|  | ||||
| ::: | ||||
|  | ||||
| Home Assistant OS users can install via the Add-on repository. | ||||
| HassOS users can install via the addon repository. | ||||
|  | ||||
| 1. In Home Assistant, navigate to _Settings_ > _Add-ons_ > _Add-on Store_ > _Repositories_ | ||||
| 2. Add `https://github.com/blakeblackshear/frigate-hass-addons` | ||||
| 3. Install the desired variant of the Frigate Add-on (see below) | ||||
| 1. Navigate to Supervisor > Add-on Store > Repositories | ||||
| 2. Add https://github.com/blakeblackshear/frigate-hass-addons | ||||
| 3. Install your desired Frigate NVR Addon and navigate to it's page | ||||
| 4. Setup your network configuration in the `Configuration` tab | ||||
| 5. Start the Add-on | ||||
| 6. Use the _Open Web UI_ button to access the Frigate UI, then click in the _cog icon_ > _Configuration editor_ and configure Frigate to your liking | ||||
| 5. (not for proxy addon) Create the file `frigate.yaml` in your `config` directory with your detailed Frigate configuration | ||||
| 6. Start the addon container | ||||
| 7. (not for proxy addon) If you are using hardware acceleration for ffmpeg, you may need to disable "Protection mode" | ||||
|  | ||||
| There are several variants of the Add-on available: | ||||
| There are several versions of the addon available: | ||||
|  | ||||
| | Add-on Variant             | Description                                                | | ||||
| | -------------------------- | ---------------------------------------------------------- | | ||||
| | Frigate                    | Current release with protection mode on                    | | ||||
| | Frigate (Full Access)      | Current release with the option to disable protection mode | | ||||
| | Frigate Beta               | Beta release with protection mode on                       | | ||||
| | Frigate Beta (Full Access) | Beta release with the option to disable protection mode    | | ||||
|  | ||||
| If you are using hardware acceleration for ffmpeg, you **may** need to use the _Full Access_ variant of the Add-on. This is because the Frigate Add-on runs in a container with limited access to the host system. The _Full Access_ variant allows you to disable _Protection mode_ and give Frigate full access to the host system. | ||||
|  | ||||
| You can also edit the Frigate configuration file through the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) or similar. In that case, the configuration file will be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](../configuration/index.md#accessing-add-on-config-dir). | ||||
| | Addon Version                  | Description                                                | | ||||
| | ------------------------------ | ---------------------------------------------------------- | | ||||
| | Frigate NVR                    | Current release with protection mode on                    | | ||||
| | Frigate NVR (Full Access)      | Current release with the option to disable protection mode | | ||||
| | Frigate NVR Beta               | Beta release with protection mode on                       | | ||||
| | Frigate NVR Beta (Full Access) | Beta release with the option to disable protection mode    | | ||||
|  | ||||
| ## Kubernetes | ||||
|  | ||||
| @@ -316,8 +313,7 @@ If you choose to run Frigate via LXC in Proxmox the setup can be complex so be p | ||||
|  | ||||
| ::: | ||||
|  | ||||
| Suggestions include: | ||||
|  | ||||
|  Suggestions include: | ||||
| - For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration: | ||||
|   - `lxc.cgroup2.devices.allow: c 226:128 rwm` | ||||
|   - `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file` | ||||
| @@ -408,7 +404,7 @@ mkdir -p /share/share_vol2/frigate/media | ||||
| # Also replace the time zone value for 'TZ' in the sample command. | ||||
| # Example command will create a docker container that uses at most 2 CPUs and 4G RAM. | ||||
| # You may need to add "--env=LIBVA_DRIVER_NAME=i965 \" to the following docker run command if you | ||||
| # have certain CPU (e.g., J4125). See https://docs.frigate.video/configuration/hardware_acceleration_video. | ||||
| # have certain CPU (e.g., J4125). See https://docs.frigate.video/configuration/hardware_acceleration. | ||||
| docker run \ | ||||
|   --name=frigate \ | ||||
|   --shm-size=256m \ | ||||
|   | ||||
| @@ -1,119 +0,0 @@ | ||||
| --- | ||||
| id: updating | ||||
| title: Updating | ||||
| --- | ||||
|  | ||||
| # Updating Frigate | ||||
|  | ||||
| The current stable version of Frigate is **0.15.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.15.0). | ||||
|  | ||||
| Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups. | ||||
|  | ||||
| ## Before You Begin | ||||
|  | ||||
| - **Stop Frigate**: For most methods, you’ll need to stop the running Frigate instance before backing up and updating. | ||||
| - **Backup Your Configuration**: Always back up your `/config` directory (e.g., `config.yml` and `frigate.db`, the SQLite database) before updating. This ensures you can roll back if something goes wrong. | ||||
| - **Check Release Notes**: Carefully review the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases) for breaking changes or configuration updates that might affect your setup. | ||||
|  | ||||
| ## Updating with Docker | ||||
|  | ||||
| If you’re running Frigate via Docker (recommended method), follow these steps: | ||||
|  | ||||
| 1. **Stop the Container**: | ||||
|  | ||||
|    - If using Docker Compose: | ||||
|      ```bash | ||||
|      docker compose down frigate | ||||
|      ``` | ||||
|    - If using `docker run`: | ||||
|      ```bash | ||||
|      docker stop frigate | ||||
|      ``` | ||||
|  | ||||
| 2. **Update and Pull the Latest Image**: | ||||
|  | ||||
|    - If using Docker Compose: | ||||
|      - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.15.0` instead of `0.14.1`). For example: | ||||
|        ```yaml | ||||
|        services: | ||||
|          frigate: | ||||
|            image: ghcr.io/blakeblackshear/frigate:0.15.0 | ||||
|        ``` | ||||
|      - Then pull the image: | ||||
|        ```bash | ||||
|        docker pull ghcr.io/blakeblackshear/frigate:0.15.0 | ||||
|        ``` | ||||
|      - **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling. | ||||
|    - If using `docker run`: | ||||
|      - Pull the image with the appropriate tag (e.g., `0.15.0`, `0.15.0-tensorrt`, or `stable`): | ||||
|        ```bash | ||||
|        docker pull ghcr.io/blakeblackshear/frigate:0.15.0 | ||||
|        ``` | ||||
|  | ||||
| 3. **Start the Container**: | ||||
|  | ||||
|    - If using Docker Compose: | ||||
|      ```bash | ||||
|      docker compose up -d | ||||
|      ``` | ||||
|    - If using `docker run`, re-run your original command (e.g., from the [Installation](./installation.md#docker) section) with the updated image tag. | ||||
|  | ||||
| 4. **Verify the Update**: | ||||
|    - Check the container logs to ensure Frigate starts successfully: | ||||
|      ```bash | ||||
|      docker logs frigate | ||||
|      ``` | ||||
|    - Visit the Frigate Web UI (default: `http://<your-ip>:5000`) to confirm the new version is running. The version number is displayed at the top of the System Metrics page. | ||||
|  | ||||
| ### Notes | ||||
|  | ||||
| - If you’ve customized other settings (e.g., `shm-size`), ensure they’re still appropriate after the update. | ||||
| - Docker will automatically use the updated image when you restart the container, as long as you pulled the correct version. | ||||
|  | ||||
| ## Updating the Home Assistant Addon | ||||
|  | ||||
| For users running Frigate as a Home Assistant Addon: | ||||
|  | ||||
| 1. **Check for Updates**: | ||||
|  | ||||
|    - Navigate to **Settings > Add-ons** in Home Assistant. | ||||
|    - Find your installed Frigate addon (e.g., "Frigate NVR" or "Frigate NVR (Full Access)"). | ||||
|    - If an update is available, you’ll see an "Update" button. | ||||
|  | ||||
| 2. **Update the Addon**: | ||||
|  | ||||
|    - Click the "Update" button next to the Frigate addon. | ||||
|    - Wait for the process to complete. Home Assistant will handle downloading and installing the new version. | ||||
|  | ||||
| 3. **Restart the Addon**: | ||||
|  | ||||
|    - After updating, go to the addon’s page and click "Restart" to apply the changes. | ||||
|  | ||||
| 4. **Verify the Update**: | ||||
|    - Check the addon logs (under the "Log" tab) to ensure Frigate starts without errors. | ||||
|    - Access the Frigate Web UI to confirm the new version is running. | ||||
|  | ||||
| ### Notes | ||||
|  | ||||
| - Ensure your `/config/frigate.yml` is compatible with the new version by reviewing the [Release notes](https://github.com/blakeblackshear/frigate/releases). | ||||
| - If using custom hardware (e.g., Coral or GPU), verify that configurations still work, as addon updates don’t modify your hardware settings. | ||||
|  | ||||
| ## Rolling Back | ||||
|  | ||||
| If an update causes issues: | ||||
|  | ||||
| 1. Stop Frigate. | ||||
| 2. Restore your backed-up config file and database. | ||||
| 3. Revert to the previous image version: | ||||
|    - For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`) in your `docker run` command. | ||||
|    - For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`), and re-run `docker compose up -d`. | ||||
|    - For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon. | ||||
| 4. Verify the old version is running again. | ||||
|  | ||||
| ## Troubleshooting | ||||
|  | ||||
| - **Container Fails to Start**: Check logs (`docker logs frigate`) for errors. | ||||
| - **UI Not Loading**: Ensure ports (e.g., 5000, 8971) are still mapped correctly and the service is running. | ||||
| - **Hardware Issues**: Revisit hardware-specific setup (e.g., Coral, GPU) if detection or decoding fails post-update. | ||||
|  | ||||
| Common questions are often answered in the [FAQ](https://github.com/blakeblackshear/frigate/discussions), pinned at the top of the support discussions. | ||||
| @@ -13,7 +13,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect | ||||
|  | ||||
| # Setup a go2rtc stream | ||||
|  | ||||
| First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#module-streams), not just rtsp. | ||||
| First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp. | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| @@ -32,74 +32,69 @@ go2rtc: | ||||
|  | ||||
| After adding this to the config, restart Frigate and try to watch the live stream for a single camera by clicking on it from the dashboard. It should look much clearer and more fluent than the original jsmpeg stream. | ||||
|  | ||||
|  | ||||
| ### What if my video doesn't play? | ||||
|  | ||||
| - Check Logs: | ||||
|  | ||||
|   - Access the go2rtc logs in the Frigate UI under Logs in the sidebar. | ||||
|   - If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. | ||||
|     - Access the go2rtc logs in the Frigate UI under Logs in the sidebar. | ||||
|     - If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. | ||||
|  | ||||
| - Check go2rtc Web Interface: if you don't see any errors in the logs, try viewing the camera through go2rtc's web interface. | ||||
|  | ||||
|   - Navigate to port 1984 in your browser to access go2rtc's web interface. | ||||
|     - If using Frigate through Home Assistant, enable the web interface at port 1984. | ||||
|     - If using Docker, forward port 1984 before accessing the web interface. | ||||
|   - Click `stream` for the specific camera to see if the camera's stream is being received. | ||||
|     - Navigate to port 1984 in your browser to access go2rtc's web interface. | ||||
|         - If using Frigate through Home Assistant, enable the web interface at port 1984. | ||||
|         - If using Docker, forward port 1984 before accessing the web interface. | ||||
|     - Click `stream` for the specific camera to see if the camera's stream is being received. | ||||
|  | ||||
| - Check Video Codec: | ||||
|     - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. | ||||
|     - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation. | ||||
|     - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. | ||||
|         ```yaml | ||||
|         go2rtc: | ||||
|           streams: | ||||
|             back: | ||||
|               - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|               - "ffmpeg:back#video=h264#hardware" | ||||
|         ``` | ||||
|  | ||||
|   - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. | ||||
|   - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#codecs-madness) in go2rtc documentation. | ||||
|   - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. | ||||
|     ```yaml | ||||
|     go2rtc: | ||||
|       streams: | ||||
|         back: | ||||
|           - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|           - "ffmpeg:back#video=h264#hardware" | ||||
|     ``` | ||||
| - Switch to FFmpeg if needed:  | ||||
|     - Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types. | ||||
|         ```yaml | ||||
|         go2rtc: | ||||
|           streams: | ||||
|             back: | ||||
|               - ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|         ``` | ||||
|  | ||||
| - Switch to FFmpeg if needed: | ||||
|     - If you can see the video but do not have audio, this is most likely because your camera's audio stream codec is not AAC. | ||||
|     - If possible, update your camera's audio settings to AAC in your camera's firmware. | ||||
|     - If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows: | ||||
|         ```yaml | ||||
|         go2rtc: | ||||
|           streams: | ||||
|             back: | ||||
|               - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|               - "ffmpeg:back#audio=aac" | ||||
|         ``` | ||||
|  | ||||
|   - Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types. | ||||
|         If you need to convert **both** the audio and video streams, you can use the following: | ||||
|  | ||||
|     ```yaml | ||||
|     go2rtc: | ||||
|       streams: | ||||
|         back: | ||||
|           - ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|     ``` | ||||
|         ```yaml | ||||
|         go2rtc: | ||||
|           streams: | ||||
|             back: | ||||
|               - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|               - "ffmpeg:back#video=h264#audio=aac#hardware" | ||||
|         ``` | ||||
|  | ||||
|   - If you can see the video but do not have audio, this is most likely because your camera's audio stream codec is not AAC. | ||||
|   - If possible, update your camera's audio settings to AAC in your camera's firmware. | ||||
|   - If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows: | ||||
|         When using the ffmpeg module, you would add AAC audio like this: | ||||
|  | ||||
|     ```yaml | ||||
|     go2rtc: | ||||
|       streams: | ||||
|         back: | ||||
|           - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|           - "ffmpeg:back#audio=aac" | ||||
|     ``` | ||||
|  | ||||
|     If you need to convert **both** the audio and video streams, you can use the following: | ||||
|  | ||||
|     ```yaml | ||||
|     go2rtc: | ||||
|       streams: | ||||
|         back: | ||||
|           - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 | ||||
|           - "ffmpeg:back#video=h264#audio=aac#hardware" | ||||
|     ``` | ||||
|  | ||||
|     When using the ffmpeg module, you would add AAC audio like this: | ||||
|  | ||||
|     ```yaml | ||||
|     go2rtc: | ||||
|       streams: | ||||
|         back: | ||||
|           - "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac#hardware" | ||||
|     ``` | ||||
|         ```yaml | ||||
|         go2rtc: | ||||
|           streams: | ||||
|             back: | ||||
|               - "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac#hardware" | ||||
|         ``` | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| @@ -115,7 +110,3 @@ section. | ||||
|  | ||||
| 1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera). | ||||
| 2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router. | ||||
|  | ||||
| ## Important considerations | ||||
|  | ||||
| If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts. | ||||
|   | ||||
| @@ -9,7 +9,7 @@ title: Getting started | ||||
|  | ||||
| If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below. | ||||
|  | ||||
| If you already have Frigate installed through Docker or through a Home Assistant Add-on, you can continue to [Configuring Frigate](#configuring-frigate) below. | ||||
| If you already have Frigate installed in Docker or as a Home Assistant addon, you can continue to [Configuring Frigate](#configuring-frigate) below. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -81,7 +81,7 @@ Now you have a minimal Debian server that requires very little maintenance. | ||||
|  | ||||
| ## Installing Frigate | ||||
|  | ||||
| This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant Add-on or another way, you can continue to [Configuring Frigate](#configuring-frigate). | ||||
| This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate). | ||||
|  | ||||
| ### Setup directories | ||||
|  | ||||
| @@ -110,6 +110,7 @@ This `docker-compose.yml` file is just a starter for amd64 devices. You will nee | ||||
| `docker-compose.yml` | ||||
|  | ||||
| ```yaml | ||||
| version: "3.9" | ||||
| services: | ||||
|   frigate: | ||||
|     container_name: frigate | ||||
| @@ -162,13 +163,14 @@ FFmpeg arguments for other types of cameras can be found [here](../configuration | ||||
|  | ||||
| ### Step 3: Configure hardware acceleration (recommended) | ||||
|  | ||||
| Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) config reference for examples applicable to your hardware. | ||||
| Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware. | ||||
|  | ||||
| Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): | ||||
|  | ||||
| `docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) | ||||
|  | ||||
| ```yaml | ||||
| version: "3.9" | ||||
| services: | ||||
|   frigate: | ||||
|     ... | ||||
| @@ -197,6 +199,7 @@ By default, Frigate will use a single CPU detector. If you have a USB Coral, you | ||||
| `docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) | ||||
|  | ||||
| ```yaml | ||||
| version: "3.9" | ||||
| services: | ||||
|   frigate: | ||||
|     ... | ||||
| @@ -303,7 +306,6 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu | ||||
| ### Step 7: Complete config | ||||
|  | ||||
| At this point you have a complete config with basic functionality. | ||||
|  | ||||
| - View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples. | ||||
| - View [full config reference](../configuration/reference.md) for a complete list of configuration options. | ||||
|  | ||||
|   | ||||
| @@ -3,18 +3,24 @@ id: ha_network_storage | ||||
| title: Home Assistant network storage | ||||
| --- | ||||
|  | ||||
| As of Home Assistant 2023.6, Network Mounted Storage is supported for Add-ons. | ||||
| As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons. | ||||
|  | ||||
| ## Setting Up Remote Storage For Frigate | ||||
|  | ||||
| ### Prerequisites | ||||
|  | ||||
| - Home Assistant 2023.6 or newer is installed | ||||
| - Running Home Assistant Operating System 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install) | ||||
| - HA Core 2023.6 or newer is installed | ||||
| - Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install) | ||||
|  | ||||
| ### Initial Setup | ||||
|  | ||||
| 1. Stop the Frigate Add-on | ||||
| 1. Stop the Frigate addon | ||||
| 2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding: | ||||
|  | ||||
| ```yaml | ||||
| database: | ||||
|   path: /config/frigate.db | ||||
| ``` | ||||
|  | ||||
| ### Move current data | ||||
|  | ||||
| @@ -37,4 +43,4 @@ Keeping the current data is optional, but the data will need to be moved regardl | ||||
| 4. Fill out the additional required info for your particular NAS | ||||
| 5. Connect | ||||
| 6. Move files from `/media/frigate_tmp` to `/media/frigate` if they were kept in previous step | ||||
| 7. Start the Frigate Add-on | ||||
| 7. Start the Frigate addon | ||||
|   | ||||
| @@ -35,7 +35,6 @@ There are many solutions available to implement reverse proxies and the communit | ||||
| * [Apache2](#apache2-reverse-proxy) | ||||
| * [Nginx](#nginx-reverse-proxy) | ||||
| * [Traefik](#traefik-reverse-proxy) | ||||
| * [Caddy](#caddy-reverse-proxy) | ||||
|  | ||||
| ## Apache2 Reverse Proxy | ||||
|  | ||||
| @@ -118,8 +117,7 @@ server { | ||||
|   set $port           8971; | ||||
|  | ||||
|   listen 80; | ||||
|   listen 443 ssl; | ||||
|   http2 on; | ||||
|   listen 443 ssl http2; | ||||
|  | ||||
|   server_name frigate.domain.com; | ||||
| } | ||||
| @@ -179,33 +177,3 @@ The above configuration will create a "service" in Traefik, automatically adding | ||||
| It will also add a router, routing requests to "traefik.example.com" to your local container. | ||||
|  | ||||
| Note that with this approach, you don't need to expose any ports for the Frigate instance since all traffic will be routed over the internal Docker network. | ||||
|  | ||||
| ## Caddy Reverse Proxy | ||||
|  | ||||
| This example shows Frigate running under a subdomain with logging and a tls cert (in this case a wildcard domain cert obtained independently of caddy) handled via imports | ||||
|  | ||||
| ```caddy | ||||
| (logging) { | ||||
|         log { | ||||
|                 output file /var/log/caddy/{args[0]}.log { | ||||
|                         roll_size 10MiB | ||||
|                         roll_keep 5 | ||||
|                         roll_keep_for 10d | ||||
|                 } | ||||
|                 format json | ||||
|                 level INFO | ||||
|         } | ||||
| } | ||||
|  | ||||
|  | ||||
| (tls) { | ||||
|         tls /var/lib/caddy/wildcard.YOUR_DOMAIN.TLD.fullchain.pem /var/lib/caddy/wildcard.YOUR_DOMAIN.TLD.privkey.pem | ||||
| } | ||||
|  | ||||
| frigate.YOUR_DOMAIN.TLD { | ||||
|         reverse_proxy http://localhost:8971  | ||||
|         import tls | ||||
|         import logging frigate.YOUR_DOMAIN.TLD | ||||
| } | ||||
|  | ||||
| ``` | ||||
|   | ||||
| @@ -51,7 +51,7 @@ When configuring the integration, you will be asked for the `URL` of your Frigat | ||||
|  | ||||
| ### Docker Compose Examples | ||||
|  | ||||
| If you are running Home Assistant and Frigate with Docker Compose on the same device, here are some examples. | ||||
| If you are running Home Assistant Core and Frigate with Docker Compose on the same device, here are some examples. | ||||
|  | ||||
| #### Home Assistant running with host networking | ||||
|  | ||||
| @@ -60,6 +60,7 @@ It is not recommended to run Frigate in host networking mode. In this example, y | ||||
| ```yaml | ||||
| services: | ||||
|   homeassistant: | ||||
|     container_name: hass | ||||
|     image: ghcr.io/home-assistant/home-assistant:stable | ||||
|     network_mode: host | ||||
|     ... | ||||
| @@ -79,6 +80,7 @@ In this example, it is recommended to connect to the authenticated port, for exa | ||||
| ```yaml | ||||
| services: | ||||
|   homeassistant: | ||||
|     container_name: hass | ||||
|     image: ghcr.io/home-assistant/home-assistant:stable | ||||
|     # network_mode: host | ||||
|     ... | ||||
| @@ -91,16 +93,17 @@ services: | ||||
|       ... | ||||
| ``` | ||||
|  | ||||
| ### Home Assistant Add-on | ||||
| ### HassOS Addon | ||||
|  | ||||
| If you are using Home Assistant Add-on, the URL should be one of the following depending on which Add-on variant you are using. Note that if you are using the Proxy Add-on, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network. | ||||
| If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network. | ||||
|  | ||||
| | Add-on Variant             | URL                                       | | ||||
| | -------------------------- | ----------------------------------------- | | ||||
| | Frigate                    | `http://ccab4aaf-frigate:5000`            | | ||||
| | Frigate (Full Access)      | `http://ccab4aaf-frigate-fa:5000`         | | ||||
| | Frigate Beta               | `http://ccab4aaf-frigate-beta:5000`       | | ||||
| | Frigate Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000`    | | ||||
| | Addon Version                  | URL                                       | | ||||
| | ------------------------------ | ----------------------------------------- | | ||||
| | Frigate NVR                    | `http://ccab4aaf-frigate:5000`            | | ||||
| | Frigate NVR (Full Access)      | `http://ccab4aaf-frigate-fa:5000`         | | ||||
| | Frigate NVR Beta               | `http://ccab4aaf-frigate-beta:5000`       | | ||||
| | Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000`    | | ||||
| | Frigate NVR HailoRT Beta       | `http://ccab4aaf-frigate-hailo-beta:5000` | | ||||
|  | ||||
| ### Frigate running on a separate machine | ||||
|  | ||||
| @@ -110,14 +113,6 @@ If you run Frigate on a separate device within your local network, Home Assistan | ||||
|  | ||||
| Use `http://<frigate_device_ip>:8971` as the URL for the integration so that authentication is required. | ||||
|  | ||||
| :::tip | ||||
|  | ||||
| The above URL assumes you have [disabled TLS](../configuration/tls). | ||||
| By default, TLS is enabled and Frigate will be using a self-signed certificate. HomeAssistant will fail to connect HTTPS to port 8971 since it fails to verify the self-signed certificate. | ||||
| Either disable TLS and use HTTP from HomeAssistant, or configure Frigate to be acessible with a valid certificate. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ```yaml | ||||
| services: | ||||
|   frigate: | ||||
|   | ||||
| @@ -28,14 +28,7 @@ Message published for each changed tracked object. The first message is publishe | ||||
|     "id": "1607123955.475377-mxklsc", | ||||
|     "camera": "front_door", | ||||
|     "frame_time": 1607123961.837752, | ||||
|     "snapshot": { | ||||
|         "frame_time": 1607123965.975463, | ||||
|         "box": [415, 489, 528, 700], | ||||
|         "area": 12728, | ||||
|         "region": [260, 446, 660, 846], | ||||
|         "score": 0.77546, | ||||
|         "attributes": [], | ||||
|     }, | ||||
|     "snapshot_time": 1607123961.837752, | ||||
|     "label": "person", | ||||
|     "sub_label": null, | ||||
|     "top_score": 0.958984375, | ||||
| @@ -61,22 +54,13 @@ Message published for each changed tracked object. The first message is publishe | ||||
|     }, // attributes with top score that have been identified on the object at any point | ||||
|     "current_attributes": [], // detailed data about the current attributes in this frame | ||||
|     "current_estimated_speed": 0.71, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled | ||||
|     "velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled | ||||
|     "recognized_license_plate": "ABC12345", // a recognized license plate for car objects | ||||
|     "recognized_license_plate_score": 0.933451 | ||||
|     "velocity_angle": 180 // direction of travel relative to the frame for objects moving through zones with speed estimation enabled | ||||
|   }, | ||||
|   "after": { | ||||
|     "id": "1607123955.475377-mxklsc", | ||||
|     "camera": "front_door", | ||||
|     "frame_time": 1607123962.082975, | ||||
|     "snapshot": { | ||||
|         "frame_time": 1607123965.975463, | ||||
|         "box": [415, 489, 528, 700], | ||||
|         "area": 12728, | ||||
|         "region": [260, 446, 660, 846], | ||||
|         "score": 0.77546, | ||||
|         "attributes": [], | ||||
|     }, | ||||
|     "snapshot_time": 1607123961.837752, | ||||
|     "label": "person", | ||||
|     "sub_label": ["John Smith", 0.79], | ||||
|     "top_score": 0.958984375, | ||||
| @@ -109,18 +93,14 @@ Message published for each changed tracked object. The first message is publishe | ||||
|       } | ||||
|     ], | ||||
|     "current_estimated_speed": 0.77, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled | ||||
|     "velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled | ||||
|     "recognized_license_plate": "ABC12345", // a recognized license plate for car objects | ||||
|     "recognized_license_plate_score": 0.933451 | ||||
|     "velocity_angle": 180 // direction of travel relative to the frame for objects moving through zones with speed estimation enabled | ||||
|   } | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### `frigate/tracked_object_update` | ||||
|  | ||||
| Message published for updates to tracked object metadata, for example: | ||||
|  | ||||
| #### Generative AI Description Update | ||||
| Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description. | ||||
|  | ||||
| ```json | ||||
| { | ||||
| @@ -130,33 +110,6 @@ Message published for updates to tracked object metadata, for example: | ||||
| } | ||||
| ``` | ||||
|  | ||||
| #### Face Recognition Update | ||||
|  | ||||
| ```json | ||||
| { | ||||
|   "type": "face", | ||||
|   "id": "1607123955.475377-mxklsc", | ||||
|   "name": "John", | ||||
|   "score": 0.95, | ||||
|   "camera": "front_door_cam", | ||||
|   "timestamp": 1607123958.748393, | ||||
| } | ||||
| ``` | ||||
|  | ||||
| #### License Plate Recognition Update | ||||
|  | ||||
| ```json | ||||
| { | ||||
|   "type": "lpr", | ||||
|   "id": "1607123955.475377-mxklsc", | ||||
|   "name": "John's Car", | ||||
|   "plate": "123ABC", | ||||
|   "score": 0.95, | ||||
|   "camera": "driveway_cam", | ||||
|   "timestamp": 1607123958.748393, | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### `frigate/reviews` | ||||
|  | ||||
| Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published. | ||||
| @@ -348,10 +301,6 @@ Topic to adjust motion contour area for a camera. Expected value is an integer. | ||||
|  | ||||
| Topic with current motion contour area for a camera. Published value is an integer. | ||||
|  | ||||
| ### `frigate/<camera_name>/review_status` | ||||
|  | ||||
| Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`. | ||||
|  | ||||
| ### `frigate/<camera_name>/ptz` | ||||
|  | ||||
| Topic to send PTZ commands to camera. | ||||
|   | ||||
| @@ -19,11 +19,11 @@ Once logged in, you can generate an API key for Frigate in Settings. | ||||
|  | ||||
| ### Set your API key | ||||
|  | ||||
| In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Add-ons > Frigate > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch). | ||||
| In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch). | ||||
|  | ||||
| :::warning | ||||
|  | ||||
| You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or Home Assistant Add-on config. | ||||
| You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or HA addon config. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| @@ -43,7 +43,7 @@ Snapshots must be enabled to be able to submit examples to Frigate+ | ||||
|  | ||||
| ### Annotate and verify | ||||
|  | ||||
| You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [annotating](../plus/annotating.md). | ||||
| You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [improving your model](../plus/improving_model.md). | ||||
|  | ||||
|  | ||||
|  | ||||
| @@ -51,8 +51,6 @@ You can view all of your submitted images at [https://plus.frigate.video](https: | ||||
|  | ||||
| Once you have [requested your first model](../plus/first_model.md) and gotten your own model ID, it can be used with a special model path. No other information needs to be configured for Frigate+ models because it fetches the remaining config from Frigate+ automatically. | ||||
|  | ||||
| You can either choose the new model from the Frigate+ pane in the Settings page of the Frigate UI, or manually set the model at the root level in your config: | ||||
|  | ||||
| ```yaml | ||||
| model: | ||||
|   path: plus://<your_model_id> | ||||
|   | ||||
| @@ -13,10 +13,6 @@ Please use your own knowledge to assess and vet them before you install anything | ||||
|  | ||||
| ::: | ||||
|  | ||||
| ## [Advanced Camera Card (formerly known as Frigate Card](https://card.camera/#/README) | ||||
|  | ||||
| The [Advanced Camera Card](https://card.camera/#/README) is a Home Assistant dashboard card with deep Frigate integration. | ||||
|  | ||||
| ## [Double Take](https://github.com/skrashevich/double-take) | ||||
|  | ||||
| [Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition. | ||||
| @@ -25,16 +21,8 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht | ||||
|  | ||||
| ## [Frigate Notify](https://github.com/0x2142/frigate-notify) | ||||
|  | ||||
| [Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended. | ||||
|  | ||||
| ## [Frigate Snap-Sync](https://github.com/thequantumphysicist/frigate-snap-sync/) | ||||
|  | ||||
| [Frigate Snap-Sync](https://github.com/thequantumphysicist/frigate-snap-sync/) is a program that works in tandem with Frigate. It responds to Frigate when a snapshot or a review is made (and more can be added), and uploads them to one or more remote server(s) of your choice. | ||||
| [Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended. | ||||
|  | ||||
| ## [Frigate telegram](https://github.com/OldTyT/frigate-telegram) | ||||
|  | ||||
| [Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail. | ||||
|  | ||||
| ## [Periscope](https://github.com/maksz42/periscope) | ||||
|  | ||||
| [Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS. | ||||
|   | ||||
| @@ -22,13 +22,3 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co | ||||
| ### Can I keep using my Frigate+ models even if I do not renew my subscription? | ||||
|  | ||||
| Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models. | ||||
|  | ||||
| ### Why can't I submit images to Frigate+? | ||||
|  | ||||
| If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that you've enabled both snapshots and `clean_copy` snapshots for the cameras you'd like to submit images for. Note that `clean_copy` is enabled by default when snapshots are enabled. | ||||
|  | ||||
| ```yaml | ||||
| snapshots: | ||||
|   enabled: true | ||||
|   clean_copy: true | ||||
| ``` | ||||
|   | ||||
| @@ -9,11 +9,11 @@ Before requesting your first model, you will need to upload and verify at least | ||||
|  | ||||
| It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present. | ||||
|  | ||||
| For more detailed recommendations, you can refer to the docs on [annotating](./annotating.md). | ||||
| For more detailed recommendations, you can refer to the docs on [improving your model](./improving_model.md). | ||||
|  | ||||
| ## Step 2: Submit a model request | ||||
|  | ||||
| Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). If you are unsure which type to request, you can test the base model for each version from the "Base Models" tab. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours. | ||||
| Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours. | ||||
|  | ||||
|  | ||||
| ## Step 3: Set your model id in the config | ||||
|   | ||||
| @@ -1,9 +1,17 @@ | ||||
| --- | ||||
| id: annotating | ||||
| title: Annotating your images | ||||
| id: improving_model | ||||
| title: Improving your model | ||||
| --- | ||||
| 
 | ||||
| For the best results, follow these guidelines. You may also want to review the documentation on [improving your model](./index.md#improving-your-model). | ||||
| You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training. | ||||
| 
 | ||||
| - **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present. | ||||
| - **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores. | ||||
| - **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training. | ||||
| 
 | ||||
| ## Properly labeling images | ||||
| 
 | ||||
| For the best results, follow the following guidelines. | ||||
| 
 | ||||
| **Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused. You can exclude labels that you don't want detected on any of your cameras. | ||||
| 
 | ||||
| @@ -17,17 +25,9 @@ For the best results, follow these guidelines. You may also want to review the d | ||||
| 
 | ||||
|  | ||||
| 
 | ||||
| ## AI suggested labels | ||||
| 
 | ||||
| If you have an active Frigate+ subscription, new uploads will be scanned for the objects configured for you camera and you will see suggested labels as light blue boxes when annotating in Frigate+. These suggestions are processed via a queue and typically complete within a minute after uploading, but processing times can be longer. | ||||
| 
 | ||||
|  | ||||
| 
 | ||||
| Suggestions are converted to labels when saving, so you should remove any errant suggestions. There is already some logic designed to avoid duplicate labels, but you may still occasionally see some duplicate suggestions. You should keep the most accurate bounding box and delete any duplicates so that you have just one label per object remaining. | ||||
| 
 | ||||
| ## False positive labels | ||||
| 
 | ||||
| False positives will be shown with a red box and the label will have a strike through. These can't be adjusted, but they can be deleted if you accidentally submit a true positive as a false positive from Frigate. | ||||
| False positives will be shown with a read box and the label will have a strike through. | ||||
|  | ||||
| 
 | ||||
| Misidentified objects should have a correct label added. For example, if a person was mistakenly detected as a cat, you should submit it as a false positive in Frigate and add a label for the person. The boxes will overlap. | ||||
| @@ -3,9 +3,15 @@ id: index | ||||
| title: Models | ||||
| --- | ||||
|  | ||||
| <a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a base model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions. | ||||
| <a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions. | ||||
|  | ||||
| With a subscription, 12 model trainings to fine tune your model per year are included. In addition, you will have access to any base models published while your subscription is active. If you cancel your subscription, you will retain access to any trained and base models in your account. An active subscription is required to submit model requests or purchase additional trainings. New base models are published quarterly with target dates of January 15th, April 15th, July 15th, and October 15th. | ||||
| :::info | ||||
|  | ||||
| The baseline model isn't directly available after subscribing. This may change in the future, but for now you will need to submit a model request with the minimum number of images. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| With a subscription, 12 model trainings per year are included. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional trainings. | ||||
|  | ||||
| Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md). | ||||
|  | ||||
| @@ -13,7 +19,7 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ | ||||
|  | ||||
| There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types). | ||||
|  | ||||
| Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models. | ||||
| Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). | ||||
|  | ||||
| | Model Type  | Description                                                                                                                                  | | ||||
| | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- | | ||||
| @@ -30,46 +36,28 @@ Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later. | ||||
|  | ||||
| ::: | ||||
|  | ||||
| | Hardware                                                                         | Recommended Detector Type | Recommended Model Type | | ||||
| | -------------------------------------------------------------------------------- | ------------------------- | ---------------------- | | ||||
| | [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended)           | `cpu`                     | `mobiledet`            | | ||||
| | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu`                 | `mobiledet`            | | ||||
| | [Intel](/configuration/object_detectors.md#openvino-detector)                    | `openvino`                | `yolonas`              | | ||||
| | [NVidia GPU](/configuration/object_detectors#onnx)\*                             | `onnx`                    | `yolonas`              | | ||||
| | [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector)\*           | `rocm`                    | `yolonas`              | | ||||
| | Hardware                                                                                                                     | Recommended Detector Type | Recommended Model Type | | ||||
| | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- | | ||||
| | [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended)                                                       | `cpu`                     | `mobiledet`            | | ||||
| | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector)                                             | `edgetpu`                 | `mobiledet`            | | ||||
| | [Intel](/configuration/object_detectors.md#openvino-detector)                                                                | `openvino`                | `yolonas`              | | ||||
| | [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\*                   | `onnx`                    | `yolonas`              | | ||||
| | [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `onnx`                    | `yolonas`              | | ||||
|  | ||||
| _\* Requires Frigate 0.15_ | ||||
|  | ||||
| ## Improving your model | ||||
|  | ||||
| Some users may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training. | ||||
|  | ||||
| - **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present. | ||||
| - **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores. | ||||
| - **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training. | ||||
|  | ||||
| ## Available label types | ||||
|  | ||||
| Frigate+ models support a more relevant set of objects for security cameras. The labels for annotation in Frigate+ are configurable by editing the camera in the Cameras section of Frigate+. Currently, the following objects are supported: | ||||
| Frigate+ models support a more relevant set of objects for security cameras. Currently, the following objects are supported: | ||||
|  | ||||
| - **People**: `person`, `face` | ||||
| - **Vehicles**: `car`, `motorcycle`, `bicycle`, `boat`, `school_bus`, `license_plate` | ||||
| - **Delivery Logos**: `amazon`, `usps`, `ups`, `fedex`, `dhl`, `an_post`, `purolator`, `postnl`, `nzpost`, `postnord`, `gls`, `dpd`, `canada_post`, `royal_mail` | ||||
| - **Animals**: `dog`, `cat`, `deer`, `horse`, `bird`, `raccoon`, `fox`, `bear`, `cow`, `squirrel`, `goat`, `rabbit`, `skunk`, `kangaroo` | ||||
| - **Vehicles**: `car`, `motorcycle`, `bicycle`, `boat`, `license_plate` | ||||
| - **Delivery Logos**: `amazon`, `usps`, `ups`, `fedex`, `dhl`, `an_post`, `purolator`, `postnl`, `nzpost`, `postnord`, `gls`, `dpd` | ||||
| - **Animals**: `dog`, `cat`, `deer`, `horse`, `bird`, `raccoon`, `fox`, `bear`, `cow`, `squirrel`, `goat`, `rabbit` | ||||
| - **Other**: `package`, `waste_bin`, `bbq_grill`, `robot_lawnmower`, `umbrella` | ||||
|  | ||||
| Other object types available in the default Frigate model are not available. Additional object types will be added in future releases. | ||||
|  | ||||
| ### Candidate labels | ||||
|  | ||||
| Candidate labels are also available for annotation. These labels don't have enough data to be included in the model yet, but using them will help add support sooner. You can enable these labels by editing the camera settings. | ||||
|  | ||||
| Where possible, these labels are mapped to existing labels during training. For example, any `baby` labels are mapped to `person` until support for new labels is added. | ||||
|  | ||||
| The candidate labels are: `baby`, `bpost`, `badger`, `possum`, `rodent`, `chicken`, `groundhog`, `boar`, `hedgehog`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball` | ||||
|  | ||||
| Candidate labels are not available for automatic suggestions. | ||||
|  | ||||
| ### Label attributes | ||||
|  | ||||
| Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed. | ||||
|   | ||||
| @@ -32,7 +32,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U | ||||
| The USB coral has different IDs when it is uninitialized and initialized. | ||||
|  | ||||
| - When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped. | ||||
| - When running through the Home Assistant OS you may need to run the Full Access variant of the Frigate Add-on with the _Protection mode_ switch disabled so that the coral can be accessed. | ||||
| - When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed. | ||||
|  | ||||
| ### Synology 716+II running DSM 7.2.1-69057 Update 5 | ||||
|  | ||||
| @@ -46,17 +46,6 @@ Some users have reported that this older device runs an older kernel causing iss | ||||
| 6. Open the control panel - info scree. The coral TPU will now be recognised as a USB Device - google inc | ||||
| 7. Start the frigate container. Everything should work now! | ||||
|  | ||||
| ### QNAP NAS | ||||
|  | ||||
| QNAP NAS devices, such as the TS-253A, may use connected Coral TPU devices if [QuMagie](https://www.qnap.com/en/software/qumagie) is installed along with its QNAP AI Core extension. If any of the features—`facial recognition`, `object recognition`, or `similar photo recognition`—are enabled, Container Station applications such as `Frigate` or `CodeProject.AI Server` will be unable to initialize the TPU device in use. | ||||
| To allow the Coral TPU device to be discovered, the you must either: | ||||
|  | ||||
| 1. [Disable the AI recognition features in QuMagie](https://docs.qnap.com/application/qumagie/2.x/en-us/configuring-qnap-ai-core-settings-FB13CE03.html), | ||||
| 2. Remove the QNAP AI Core extension or | ||||
| 3. Manually start the QNAP AI Core extension after Frigate has fully started (not recommended). | ||||
|  | ||||
| It is also recommended to restart the NAS once the changes have been made. | ||||
|  | ||||
| ## USB Coral Detection Appears to be Stuck | ||||
|  | ||||
| The USB Coral can become stuck and need to be restarted, this can happen for a number of reasons depending on hardware and software setup. Some common reasons are: | ||||
| @@ -66,10 +55,10 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n | ||||
|  | ||||
| ## PCIe Coral Not Detected | ||||
|  | ||||
| The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run. | ||||
| The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.  | ||||
|  | ||||
| - In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral. | ||||
| - For some newer Linux distros (for example, Ubuntu 22.04+), https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver. | ||||
| - For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver. | ||||
|  | ||||
| ## Attempting to load TPU as pci & Fatal Python error: Illegal instruction | ||||
|  | ||||
|   | ||||
| @@ -34,7 +34,7 @@ Frigate generally [recommends cameras with configurable sub streams](/frigate/ha | ||||
| To do this efficiently the following setup is required: | ||||
|  | ||||
| 1. A GPU or iGPU must be available to do the scaling. | ||||
| 2. [ffmpeg presets for hwaccel](/configuration/hardware_acceleration_video.md) must be used | ||||
| 2. [ffmpeg presets for hwaccel](/configuration/hardware_acceleration.md) must be used | ||||
| 3. Set the desired detection resolution for `detect -> width` and `detect -> height`. | ||||
|  | ||||
| When this is done correctly, the GPU will do the decoding and scaling which will result in a small increase in CPU usage but with better results. | ||||
|   | ||||
| @@ -1,13 +0,0 @@ | ||||
| --- | ||||
| id: gpu | ||||
| title: Troubleshooting GPU | ||||
| --- | ||||
|  | ||||
| ## OpenVINO | ||||
|  | ||||
| ### Can't get OPTIMIZATION_CAPABILITIES property as no supported devices found. | ||||
|  | ||||
| Some users have reported issues using some Intel iGPUs with OpenVINO, where the GPU would not be detected. This error can be caused by various problems, so it is important to ensure the configuration is setup correctly. Some solutions users have noted: | ||||
|  | ||||
| - In some cases users have noted that an HDMI dummy plug was necessary to be plugged into the motherboard's HDMI port. | ||||
| - When mixing an Intel iGPU with Nvidia GPU, the devices can be mixed up between `/dev/dri/renderD128` and `/dev/dri/renderD129` so it is important to confirm the correct device, or map the entire `/dev/dri` directory into the Frigate container. | ||||
| @@ -47,9 +47,10 @@ On linux, some helpful tools/commands in diagnosing would be: | ||||
|  | ||||
| On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters: | ||||
|  | ||||
| **Docker Compose example** | ||||
| **Compose example** | ||||
|  | ||||
| ```yaml | ||||
| version: "3.9" | ||||
| services: | ||||
|   frigate: | ||||
|     ... | ||||
|   | ||||
| @@ -1,110 +1,56 @@ | ||||
| import type * as Preset from "@docusaurus/preset-classic"; | ||||
| import * as path from "node:path"; | ||||
| import type { Config, PluginConfig } from "@docusaurus/types"; | ||||
| import type * as OpenApiPlugin from "docusaurus-plugin-openapi-docs"; | ||||
| import type * as Preset from '@docusaurus/preset-classic'; | ||||
| import * as path from 'node:path'; | ||||
| import type { Config, PluginConfig } from '@docusaurus/types'; | ||||
| import type * as OpenApiPlugin from 'docusaurus-plugin-openapi-docs'; | ||||
|  | ||||
| const config: Config = { | ||||
|   title: "Frigate", | ||||
|   tagline: "NVR With Realtime Object Detection for IP Cameras", | ||||
|   url: "https://docs.frigate.video", | ||||
|   baseUrl: "/", | ||||
|   onBrokenLinks: "throw", | ||||
|   onBrokenMarkdownLinks: "warn", | ||||
|   favicon: "img/favicon.ico", | ||||
|   organizationName: "blakeblackshear", | ||||
|   projectName: "frigate", | ||||
|   themes: [ | ||||
|     "@docusaurus/theme-mermaid", | ||||
|     "docusaurus-theme-openapi-docs", | ||||
|     "@inkeep/docusaurus/chatButton", | ||||
|     "@inkeep/docusaurus/searchBar", | ||||
|   ], | ||||
|   title: 'Frigate', | ||||
|   tagline: 'NVR With Realtime Object Detection for IP Cameras', | ||||
|   url: 'https://docs.frigate.video', | ||||
|   baseUrl: '/', | ||||
|   onBrokenLinks: 'throw', | ||||
|   onBrokenMarkdownLinks: 'warn', | ||||
|   favicon: 'img/favicon.ico', | ||||
|   organizationName: 'blakeblackshear', | ||||
|   projectName: 'frigate', | ||||
|   themes: ['@docusaurus/theme-mermaid', 'docusaurus-theme-openapi-docs'], | ||||
|   markdown: { | ||||
|     mermaid: true, | ||||
|   }, | ||||
|   i18n: { | ||||
|     defaultLocale: 'en', | ||||
|     locales: ['en'], | ||||
|     localeConfigs: { | ||||
|       en: { | ||||
|         label: 'English', | ||||
|       } | ||||
|     }, | ||||
|   }, | ||||
|   themeConfig: { | ||||
|     announcementBar: { | ||||
|       id: 'frigate_plus', | ||||
|       content: ` | ||||
|         <span style="margin-right: 8px; display: inline-block; animation: pulse 2s infinite;">🚀</span> | ||||
|         Get more relevant and accurate detections with Frigate+ models. | ||||
|         <a style="margin-left: 12px; padding: 3px 10px; background: #94d2bd; color: #001219; text-decoration: none; border-radius: 4px; font-weight: 500; " target="_blank" rel="noopener noreferrer" href="https://frigate.video/plus/">Learn more</a> | ||||
|         <span style="margin-left: 8px; display: inline-block; animation: pulse 2s infinite;">✨</span> | ||||
|         <style> | ||||
|           @keyframes pulse { | ||||
|             0%, 100% { transform: scale(1); } | ||||
|             50%       { transform: scale(1.1); } | ||||
|           } | ||||
|         </style>`, | ||||
|       backgroundColor: '#005f73', | ||||
|       textColor: '#e0fbfc', | ||||
|       isCloseable: false, | ||||
|     algolia: { | ||||
|       appId: 'WIURGBNBPY', | ||||
|       apiKey: 'd02cc0a6a61178b25da550212925226b', | ||||
|       indexName: 'frigate', | ||||
|     }, | ||||
|     docs: { | ||||
|       sidebar: { | ||||
|         hideable: true, | ||||
|       }, | ||||
|     }, | ||||
|     inkeepConfig: { | ||||
|       baseSettings: { | ||||
|         apiKey: "b1a4c4d73c9b48aa5b3cdae6e4c81f0bb3d1134eeb5a7100", | ||||
|         integrationId: "cm6xmhn9h000gs601495fkkdx", | ||||
|         organizationId: "org_map2JQEOco8U1ZYY", | ||||
|         primaryBrandColor: "#010101", | ||||
|       }, | ||||
|       aiChatSettings: { | ||||
|         chatSubjectName: "Frigate", | ||||
|         botAvatarSrcUrl: "https://frigate.video/images/favicon.png", | ||||
|         getHelpCallToActions: [ | ||||
|           { | ||||
|             name: "GitHub", | ||||
|             url: "https://github.com/blakeblackshear/frigate", | ||||
|             icon: { | ||||
|               builtIn: "FaGithub", | ||||
|             }, | ||||
|           }, | ||||
|         ], | ||||
|         quickQuestions: [ | ||||
|           "How to configure and setup camera settings?", | ||||
|           "How to setup notifications?", | ||||
|           "Supported builtin detectors?", | ||||
|           "How to restream video feed?", | ||||
|           "How can I get sound or audio in my recordings?", | ||||
|         ], | ||||
|       }, | ||||
|     }, | ||||
|     prism: { | ||||
|       additionalLanguages: ["bash", "json"], | ||||
|       additionalLanguages: ['bash', 'json'], | ||||
|     }, | ||||
|     languageTabs: [ | ||||
|       { | ||||
|         highlight: "python", | ||||
|         language: "python", | ||||
|         logoClass: "python", | ||||
|         highlight: 'python', | ||||
|         language: 'python', | ||||
|         logoClass: 'python', | ||||
|       }, | ||||
|       { | ||||
|         highlight: "javascript", | ||||
|         language: "nodejs", | ||||
|         logoClass: "nodejs", | ||||
|         highlight: 'javascript', | ||||
|         language: 'nodejs', | ||||
|         logoClass: 'nodejs', | ||||
|       }, | ||||
|       { | ||||
|         highlight: "javascript", | ||||
|         language: "javascript", | ||||
|         logoClass: "javascript", | ||||
|         highlight: 'javascript', | ||||
|         language: 'javascript', | ||||
|         logoClass: 'javascript', | ||||
|       }, | ||||
|       { | ||||
|         highlight: "bash", | ||||
|         language: "curl", | ||||
|         logoClass: "curl", | ||||
|         highlight: 'bash', | ||||
|         language: 'curl', | ||||
|         logoClass: 'curl', | ||||
|       }, | ||||
|       { | ||||
|         highlight: "rust", | ||||
| @@ -113,38 +59,28 @@ const config: Config = { | ||||
|       }, | ||||
|     ], | ||||
|     navbar: { | ||||
|       title: "Frigate", | ||||
|       title: 'Frigate', | ||||
|       logo: { | ||||
|         alt: "Frigate", | ||||
|         src: "img/logo.svg", | ||||
|         srcDark: "img/logo-dark.svg", | ||||
|         alt: 'Frigate', | ||||
|         src: 'img/logo.svg', | ||||
|         srcDark: 'img/logo-dark.svg', | ||||
|       }, | ||||
|       items: [ | ||||
|         { | ||||
|           to: "/", | ||||
|           activeBasePath: "docs", | ||||
|           label: "Docs", | ||||
|           position: "left", | ||||
|           to: '/', | ||||
|           activeBasePath: 'docs', | ||||
|           label: 'Docs', | ||||
|           position: 'left', | ||||
|         }, | ||||
|         { | ||||
|           href: "https://frigate.video", | ||||
|           label: "Website", | ||||
|           position: "right", | ||||
|         }, | ||||
|         { | ||||
|           href: "http://demo.frigate.video", | ||||
|           label: "Demo", | ||||
|           position: "right", | ||||
|         }, | ||||
|         { | ||||
|           type: 'localeDropdown', | ||||
|           href: 'https://frigate.video', | ||||
|           label: 'Website', | ||||
|           position: 'right', | ||||
|         }, | ||||
|         { | ||||
|           href: 'http://demo.frigate.video', | ||||
|           label: 'Demo', | ||||
|           position: 'right', | ||||
|           dropdownItemsAfter: [ | ||||
|             { | ||||
|               label: '简体中文(社区翻译)', | ||||
|               href: 'https://docs.frigate-cn.video', | ||||
|             } | ||||
|           ] | ||||
|         }, | ||||
|         { | ||||
|           href: 'https://github.com/blakeblackshear/frigate', | ||||
| @@ -154,18 +90,18 @@ const config: Config = { | ||||
|       ], | ||||
|     }, | ||||
|     footer: { | ||||
|       style: "dark", | ||||
|       style: 'dark', | ||||
|       links: [ | ||||
|         { | ||||
|           title: "Community", | ||||
|           title: 'Community', | ||||
|           items: [ | ||||
|             { | ||||
|               label: "GitHub", | ||||
|               href: "https://github.com/blakeblackshear/frigate", | ||||
|               label: 'GitHub', | ||||
|               href: 'https://github.com/blakeblackshear/frigate', | ||||
|             }, | ||||
|             { | ||||
|               label: "Discussions", | ||||
|               href: "https://github.com/blakeblackshear/frigate/discussions", | ||||
|               label: 'Discussions', | ||||
|               href: 'https://github.com/blakeblackshear/frigate/discussions', | ||||
|             }, | ||||
|           ], | ||||
|         }, | ||||
| @@ -174,19 +110,19 @@ const config: Config = { | ||||
|     }, | ||||
|   }, | ||||
|   plugins: [ | ||||
|     path.resolve(__dirname, "plugins", "raw-loader"), | ||||
|     path.resolve(__dirname, 'plugins', 'raw-loader'), | ||||
|     [ | ||||
|       "docusaurus-plugin-openapi-docs", | ||||
|       'docusaurus-plugin-openapi-docs', | ||||
|       { | ||||
|         id: "openapi", | ||||
|         docsPluginId: "classic", // configured for preset-classic | ||||
|         id: 'openapi', | ||||
|         docsPluginId: 'classic', // configured for preset-classic | ||||
|         config: { | ||||
|           frigateApi: { | ||||
|             specPath: "static/frigate-api.yaml", | ||||
|             outputDir: "docs/integrations/api", | ||||
|             specPath: 'static/frigate-api.yaml', | ||||
|             outputDir: 'docs/integrations/api', | ||||
|             sidebarOptions: { | ||||
|               groupPathsBy: "tag", | ||||
|               categoryLinkSource: "tag", | ||||
|               groupPathsBy: 'tag', | ||||
|               categoryLinkSource: 'tag', | ||||
|               sidebarCollapsible: true, | ||||
|               sidebarCollapsed: true, | ||||
|             }, | ||||
| @@ -194,24 +130,23 @@ const config: Config = { | ||||
|           } satisfies OpenApiPlugin.Options, | ||||
|         }, | ||||
|       }, | ||||
|     ], | ||||
|     ] | ||||
|   ] as PluginConfig[], | ||||
|   presets: [ | ||||
|     [ | ||||
|       "classic", | ||||
|       'classic', | ||||
|       { | ||||
|         docs: { | ||||
|           routeBasePath: "/", | ||||
|           sidebarPath: "./sidebars.ts", | ||||
|           routeBasePath: '/', | ||||
|           sidebarPath: './sidebars.ts', | ||||
|           // Please change this to your repo. | ||||
|           editUrl: | ||||
|             "https://github.com/blakeblackshear/frigate/edit/master/docs/", | ||||
|           editUrl: 'https://github.com/blakeblackshear/frigate/edit/master/docs/', | ||||
|           sidebarCollapsible: false, | ||||
|           docItemComponent: "@theme/ApiItem", // Derived from docusaurus-theme-openapi | ||||
|           docItemComponent: '@theme/ApiItem', // Derived from docusaurus-theme-openapi | ||||
|         }, | ||||
|  | ||||
|         theme: { | ||||
|           customCss: "./src/css/custom.css", | ||||
|           customCss: './src/css/custom.css', | ||||
|         }, | ||||
|       } satisfies Preset.Options, | ||||
|     ], | ||||
|   | ||||
							
								
								
									
										7817
									
								
								docs/package-lock.json
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										7817
									
								
								docs/package-lock.json
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -17,11 +17,10 @@ | ||||
|     "write-heading-ids": "docusaurus write-heading-ids" | ||||
|   }, | ||||
|   "dependencies": { | ||||
|     "@docusaurus/core": "^3.7.0", | ||||
|     "@docusaurus/plugin-content-docs": "^3.6.3", | ||||
|     "@docusaurus/preset-classic": "^3.7.0", | ||||
|     "@docusaurus/core": "^3.6.3", | ||||
|     "@docusaurus/preset-classic": "^3.6.3", | ||||
|     "@docusaurus/theme-mermaid": "^3.6.3", | ||||
|     "@inkeep/docusaurus": "^2.0.16", | ||||
|     "@docusaurus/plugin-content-docs": "^3.6.3", | ||||
|     "@mdx-js/react": "^3.1.0", | ||||
|     "clsx": "^2.1.1", | ||||
|     "docusaurus-plugin-openapi-docs": "^4.3.1", | ||||
|   | ||||
							
								
								
									
										126
									
								
								docs/sidebars.ts
									
									
									
									
									
								
							
							
						
						
									
										126
									
								
								docs/sidebars.ts
									
									
									
									
									
								
							| @@ -1,6 +1,6 @@ | ||||
| import type { SidebarsConfig } from "@docusaurus/plugin-content-docs"; | ||||
| import { PropSidebarItemLink } from "@docusaurus/plugin-content-docs"; | ||||
| import frigateHttpApiSidebar from "./docs/integrations/api/sidebar"; | ||||
| import type { SidebarsConfig, } from '@docusaurus/plugin-content-docs'; | ||||
| import { PropSidebarItemLink } from '@docusaurus/plugin-content-docs'; | ||||
| import frigateHttpApiSidebar from './docs/integrations/api/sidebar'; | ||||
|  | ||||
| const sidebars: SidebarsConfig = { | ||||
|   docs: { | ||||
| @@ -8,105 +8,99 @@ const sidebars: SidebarsConfig = { | ||||
|       'frigate/index', | ||||
|       'frigate/hardware', | ||||
|       'frigate/installation', | ||||
|       'frigate/updating', | ||||
|       'frigate/camera_setup', | ||||
|       'frigate/video_pipeline', | ||||
|       'frigate/glossary', | ||||
|     ], | ||||
|     Guides: [ | ||||
|       "guides/getting_started", | ||||
|       "guides/configuring_go2rtc", | ||||
|       "guides/ha_notifications", | ||||
|       "guides/ha_network_storage", | ||||
|       "guides/reverse_proxy", | ||||
|       'guides/getting_started', | ||||
|       'guides/configuring_go2rtc', | ||||
|       'guides/ha_notifications', | ||||
|       'guides/ha_network_storage', | ||||
|       'guides/reverse_proxy', | ||||
|     ], | ||||
|     Configuration: { | ||||
|       "Configuration Files": [ | ||||
|         "configuration/index", | ||||
|         "configuration/reference", | ||||
|       'Configuration Files': [ | ||||
|         'configuration/index', | ||||
|         'configuration/reference', | ||||
|         { | ||||
|           type: "link", | ||||
|           label: "Go2RTC Configuration Reference", | ||||
|           href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration", | ||||
|           type: 'link', | ||||
|           label: 'Go2RTC Configuration Reference', | ||||
|           href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration', | ||||
|         } as PropSidebarItemLink, | ||||
|       ], | ||||
|       Detectors: [ | ||||
|         "configuration/object_detectors", | ||||
|         "configuration/audio_detectors", | ||||
|         'configuration/object_detectors', | ||||
|         'configuration/audio_detectors', | ||||
|       ], | ||||
|       Enrichments: [ | ||||
|         "configuration/semantic_search", | ||||
|         "configuration/genai", | ||||
|         "configuration/face_recognition", | ||||
|         "configuration/license_plate_recognition", | ||||
|         "configuration/bird_classification", | ||||
|       Classifiers: [ | ||||
|         'configuration/semantic_search', | ||||
|         'configuration/genai', | ||||
|         'configuration/face_recognition', | ||||
|         'configuration/license_plate_recognition', | ||||
|       ], | ||||
|       Cameras: [ | ||||
|         "configuration/cameras", | ||||
|         "configuration/review", | ||||
|         "configuration/record", | ||||
|         "configuration/snapshots", | ||||
|         "configuration/motion_detection", | ||||
|         "configuration/birdseye", | ||||
|         "configuration/live", | ||||
|         "configuration/restream", | ||||
|         "configuration/autotracking", | ||||
|         "configuration/camera_specific", | ||||
|         'configuration/cameras', | ||||
|         'configuration/review', | ||||
|         'configuration/record', | ||||
|         'configuration/snapshots', | ||||
|         'configuration/motion_detection', | ||||
|         'configuration/birdseye', | ||||
|         'configuration/live', | ||||
|         'configuration/restream', | ||||
|         'configuration/autotracking', | ||||
|         'configuration/camera_specific', | ||||
|       ], | ||||
|       Objects: [ | ||||
|         "configuration/object_filters", | ||||
|         "configuration/masks", | ||||
|         "configuration/zones", | ||||
|         "configuration/objects", | ||||
|         "configuration/stationary_objects", | ||||
|         'configuration/object_filters', | ||||
|         'configuration/masks', | ||||
|         'configuration/zones', | ||||
|         'configuration/objects', | ||||
|         'configuration/stationary_objects', | ||||
|       ], | ||||
|       "Hardware Acceleration": [ | ||||
|         "configuration/hardware_acceleration_video", | ||||
|         "configuration/hardware_acceleration_enrichments", | ||||
|       ], | ||||
|       "Extra Configuration": [ | ||||
|         "configuration/authentication", | ||||
|         "configuration/notifications", | ||||
|         "configuration/ffmpeg_presets", | ||||
|       'Extra Configuration': [ | ||||
|         'configuration/authentication', | ||||
|         'configuration/notifications', | ||||
|         'configuration/hardware_acceleration', | ||||
|         'configuration/ffmpeg_presets', | ||||
|         "configuration/pwa", | ||||
|         "configuration/tls", | ||||
|         "configuration/advanced", | ||||
|         'configuration/tls', | ||||
|         'configuration/advanced', | ||||
|       ], | ||||
|     }, | ||||
|     Integrations: [ | ||||
|       "integrations/plus", | ||||
|       "integrations/home-assistant", | ||||
|       'integrations/plus', | ||||
|       'integrations/home-assistant', | ||||
|       // This is the HTTP API generated by OpenAPI | ||||
|       { | ||||
|         type: "category", | ||||
|         label: "HTTP API", | ||||
|         type: 'category', | ||||
|         label: 'HTTP API', | ||||
|         link: { | ||||
|           type: "generated-index", | ||||
|           title: "Frigate HTTP API", | ||||
|           description: "HTTP API", | ||||
|           slug: "/integrations/api/frigate-http-api", | ||||
|           type: 'generated-index', | ||||
|           title: 'Frigate HTTP API', | ||||
|           description: 'HTTP API', | ||||
|           slug: '/integrations/api/frigate-http-api', | ||||
|         }, | ||||
|         items: frigateHttpApiSidebar, | ||||
|       }, | ||||
|       "integrations/mqtt", | ||||
|       "configuration/metrics", | ||||
|       "integrations/third_party_extensions", | ||||
|       'integrations/mqtt', | ||||
|       'configuration/metrics', | ||||
|       'integrations/third_party_extensions', | ||||
|     ], | ||||
|     'Frigate+': [ | ||||
|       'plus/index', | ||||
|       'plus/annotating', | ||||
|       'plus/first_model', | ||||
|       'plus/improving_model', | ||||
|       'plus/faq', | ||||
|     ], | ||||
|     Troubleshooting: [ | ||||
|       "troubleshooting/faqs", | ||||
|       "troubleshooting/recordings", | ||||
|       "troubleshooting/gpu", | ||||
|       "troubleshooting/edgetpu", | ||||
|       'troubleshooting/faqs', | ||||
|       'troubleshooting/recordings', | ||||
|       'troubleshooting/edgetpu', | ||||
|     ], | ||||
|     Development: [ | ||||
|       "development/contributing", | ||||
|       "development/contributing-boards", | ||||
|       'development/contributing', | ||||
|       'development/contributing-boards', | ||||
|     ], | ||||
|   }, | ||||
| }; | ||||
|   | ||||
| @@ -1,25 +0,0 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { useLocation } from '@docusaurus/router'; | ||||
| import styles from './styles.module.css'; | ||||
|  | ||||
| export default function LanguageAlert() { | ||||
|   const [showAlert, setShowAlert] = useState(false); | ||||
|   const { pathname } = useLocation(); | ||||
|  | ||||
|   useEffect(() => { | ||||
|     const userLanguage = navigator?.language || 'en'; | ||||
|     const isChineseUser = userLanguage.includes('zh'); | ||||
|     setShowAlert(isChineseUser); | ||||
|      | ||||
|   }, [pathname]); | ||||
|  | ||||
|   if (!showAlert) return null; | ||||
|  | ||||
|   return ( | ||||
|     <div className={styles.alert}> | ||||
|       <span>检测到您的主要语言为中文,您可以访问由中文社区翻译的</span> | ||||
|       <a href={'https://docs.frigate-cn.video'+pathname}>中文文档</a> | ||||
|       <span> 以获得更好的体验</span> | ||||
|     </div> | ||||
|   ); | ||||
| } | ||||
| @@ -1,13 +0,0 @@ | ||||
| .alert { | ||||
|     padding: 12px; | ||||
|     background: #fff8e6; | ||||
|     border-bottom: 1px solid #ffd166; | ||||
|     text-align: center; | ||||
|     font-size: 15px; | ||||
|   } | ||||
|    | ||||
|   .alert a { | ||||
|     color: #1890ff; | ||||
|     font-weight: 500; | ||||
|     margin-left: 6px; | ||||
|   } | ||||
| @@ -1,15 +0,0 @@ | ||||
| import React from 'react'; | ||||
| import NavbarLayout from '@theme/Navbar/Layout'; | ||||
| import NavbarContent from '@theme/Navbar/Content'; | ||||
| import LanguageAlert from '../../components/LanguageAlert'; | ||||
|  | ||||
| export default function Navbar() { | ||||
|   return ( | ||||
|     <> | ||||
|       <NavbarLayout> | ||||
|         <NavbarContent /> | ||||
|       </NavbarLayout> | ||||
|       <LanguageAlert /> | ||||
|     </> | ||||
|   ); | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user