From 7e7b3288a8f8a01445e91f889adae10192ce393e Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 4 Sep 2025 06:44:33 -0600 Subject: [PATCH 01/17] Update live FAQ for camera distortion (#19907) * Add item to FAQ about stream distortion * Update updating docs * Update link --- docs/docs/configuration/live.md | 4 ++++ docs/docs/frigate/updating.md | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 35d401a67..d9bc107f2 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -251,3 +251,7 @@ Note that disabling a camera through the config file (`enabled: False`) removes 6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?** If your camera is streaming (as indicated by a red dot in the upper right, or if it has been set to continuous streaming mode), your browser may be blocking audio until you interact with the page. This is an intentional browser limitation. See [this article](https://developer.mozilla.org/en-US/docs/Web/Media/Autoplay_guide#autoplay_availability). Many browsers have a whitelist feature to change this behavior. + +7. **My camera streams have lots of visual artifacts / distortion.** + + Some cameras don't include the hardware to support multiple connections to the high resolution stream, and this can cause unexpected behavior. In this case it is recommended to [restream](./restream.md) the high resolution stream so that it can be used for live view and recordings. diff --git a/docs/docs/frigate/updating.md b/docs/docs/frigate/updating.md index 12c8eb0a3..fdfbf906b 100644 --- a/docs/docs/frigate/updating.md +++ b/docs/docs/frigate/updating.md @@ -5,7 +5,7 @@ title: Updating # Updating Frigate -The current stable version of Frigate is **0.16.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.0). +The current stable version of Frigate is **0.16.1**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.1). Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups. @@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps: 2. **Update and Pull the Latest Image**: - If using Docker Compose: - - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.0` instead of `0.15.2`). For example: + - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.1` instead of `0.15.2`). For example: ```yaml services: frigate: - image: ghcr.io/blakeblackshear/frigate:0.16.0 + image: ghcr.io/blakeblackshear/frigate:0.16.1 ``` - Then pull the image: ```bash - docker pull ghcr.io/blakeblackshear/frigate:0.16.0 + docker pull ghcr.io/blakeblackshear/frigate:0.16.1 ``` - **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling. - If using `docker run`: - - Pull the image with the appropriate tag (e.g., `0.16.0`, `0.16.0-tensorrt`, or `stable`): + - Pull the image with the appropriate tag (e.g., `0.16.1`, `0.16.1-tensorrt`, or `stable`): ```bash - docker pull ghcr.io/blakeblackshear/frigate:0.16.0 + docker pull ghcr.io/blakeblackshear/frigate:0.16.1 ``` 3. **Start the Container**: From 6591210050bd10705863afa67648d3eccc615471 Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Fri, 5 Sep 2025 19:01:26 +0800 Subject: [PATCH 02/17] docs: fix reolink camera table display (#19926) --- docs/docs/configuration/camera_specific.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 98bb02c17..3a3809605 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -147,6 +147,7 @@ WEB Digest Algorithm - MD5 Reolink has many different camera models with inconsistently supported features and behavior. The below table shows a summary of various features and recommendations. | Camera Resolution | Camera Generation | Recommended Stream Type | Additional Notes | +| ---------------- | ------------------------- | -------------------------------- | ----------------------------------------------------------------------- | | 5MP or lower | All | http-flv | Stream is h264 | | 6MP or higher | Latest (ex: Duo3, CX-8##) | http-flv with ffmpeg 8.0, or rtsp | This uses the new http-flv-enhanced over H265 which requires ffmpeg 8.0 | | 6MP or higher | Older (ex: RLC-8##) | rtsp | | From d7f7cd7be16bfe7a12766b797da6b8add687ccd9 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 5 Sep 2025 06:33:57 -0500 Subject: [PATCH 03/17] best thumbnail endpoint should pass correct extension param (#19930) --- frigate/api/media.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 971475cba..e3de57cd3 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -1598,7 +1598,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str): try: event_id = event_query.scalar() - return event_thumbnail(request, event_id, 60) + return event_thumbnail(request, event_id, Extension.jpg, 60) except DoesNotExist: frame = np.zeros((175, 175, 3), np.uint8) ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) From 60714a733ed13fa8225b21b2aa2bffef428e9b1e Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sun, 7 Sep 2025 06:01:10 -0500 Subject: [PATCH 04/17] update docs for Frigate+ yolov9 (#19938) * update docs for Frigate+ yolov9 * footnote memryx suport * tweaks --- docs/docs/plus/index.md | 49 +++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/docs/docs/plus/index.md b/docs/docs/plus/index.md index eba9bc370..ba52fb713 100644 --- a/docs/docs/plus/index.md +++ b/docs/docs/plus/index.md @@ -11,34 +11,51 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ ## Available model types -There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types). +There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yolov9`. All of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types). Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models. -| Model Type | Description | -| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- | -| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. | -| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. | +| Model Type | Description | +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. | +| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. | +| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, and Rockchip NPUs. | + +_\* Support coming in 0.17_ + +### YOLOv9 Details + +YOLOv9 models are available in `s` and `t` sizes. When requesting a `yolov9` model, you will be prompted to choose a size. If you are unsure what size to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`. + +:::info + +When switching to YOLOv9, you may need to adjust your thresholds for some objects. + +::: + +#### Hailo Support + +If you have a Hailo device, you will need to specify the hardware you have when submitting a model request because they are not cross compatible. Please test using the available base models before submitting your model request. + +#### Rockchip (RKNN) Support + +For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is coming in 0.17. ## Supported detector types -Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), and ONNX (`onnx`) detectors. - -:::warning - -Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later. - -::: +Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip\* (`rknn`) detectors. | Hardware | Recommended Detector Type | Recommended Model Type | | -------------------------------------------------------------------------------- | ------------------------- | ---------------------- | | [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` | | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` | -| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` | -| [NVidia GPU](/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` | -| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` | +| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolov9` | +| [NVidia GPU](/configuration/object_detectors#onnx) | `onnx` | `yolov9` | +| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `rocm` | `yolov9` | +| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` | +| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` | -_\* Requires Frigate 0.15_ +_\* Requires manual conversion in 0.16. Automatic conversion coming in 0.17._ ## Improving your model From 7566aecb0b3bb6796697b2df52ca4247288da9e2 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 7 Sep 2025 14:12:49 -0500 Subject: [PATCH 05/17] Add note about Apple Silicon support in 0.17 (#19944) --- docs/docs/plus/index.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/docs/plus/index.md b/docs/docs/plus/index.md index ba52fb713..fddd9ad5b 100644 --- a/docs/docs/plus/index.md +++ b/docs/docs/plus/index.md @@ -15,11 +15,11 @@ There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yo Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models. -| Model Type | Description | -| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. | -| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. | -| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, and Rockchip NPUs. | +| Model Type | Description | +| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. | +| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. | +| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, Apple Silicon\*, and Rockchip NPUs. | _\* Support coming in 0.17_ From 0eb441fe50afc9d77aad452061618ebd76875d16 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 7 Sep 2025 13:59:48 -0600 Subject: [PATCH 06/17] Update inference times for yolov9 (#19946) --- docs/docs/frigate/hardware.md | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 8a9454e2c..7e4c721ab 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -99,6 +99,7 @@ In real-world deployments, even with multiple cameras running concurrently, Frig | Name | Hailo‑8 Inference Time | Hailo‑8L Inference Time | | ---------------- | ---------------------- | ----------------------- | | ssd mobilenet v1 | ~ 6 ms | ~ 10 ms | +| yolov9-tiny | | 320: 18ms | | yolov6n | ~ 7 ms | ~ 11 ms | ### Google Coral TPU @@ -131,17 +132,17 @@ More information is available [in the detector docs](/configuration/object_detec Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: -| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | -| -------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- | -| Intel HD 530 | 15 - 35 ms | | | Can only run one detector instance | -| Intel HD 620 | 15 - 25 ms | 320: ~ 35 ms | | | -| Intel HD 630 | ~ 15 ms | 320: ~ 30 ms | | | -| Intel UHD 730 | ~ 10 ms | 320: ~ 19 ms 640: ~ 54 ms | | | -| Intel UHD 770 | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | | -| Intel N100 | ~ 15 ms | 320: ~ 25 ms | | Can only run one detector instance | -| Intel Iris XE | ~ 10 ms | 320: ~ 18 ms 640: ~ 50 ms | | | -| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | -| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | | +| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | +| -------------- | -------------------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- | +| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance | +| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | | +| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | | +| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | | +| Intel UHD 770 | ~ 15 ms | t-320: 24: ms s-320: 30 ms | 320: ~ 20 ms 640: ~ 46 ms | | | +| Intel N100 | ~ 15 ms | s-320: 30ms | 320: ~ 25 ms | | Can only run one detector instance | +| Intel Iris XE | ~ 10 ms | | 320: ~ 18 ms 640: ~ 50 ms | | | +| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | +| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | ### TensorRT - Nvidia GPU @@ -179,7 +180,7 @@ With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detec | Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | | --------- | --------------------- | ------------------------- | -| AMD 780M | ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms | +| AMD 780M | 320: ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms | | AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms | ## Community Supported Detectors From 751de141d53f5d5ce5ac62505a6b27512021ee99 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sun, 7 Sep 2025 20:19:40 -0500 Subject: [PATCH 07/17] Fix model selection type in Frigate+ settings pane (#19952) * model type does not need to match config model type As long as a model is supported by a detector, it should be available in the list * fix missing semicolon the web linter was complaining --- web/src/components/player/HlsVideoPlayer.tsx | 2 +- web/src/views/settings/FrigatePlusSettingsView.tsx | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/web/src/components/player/HlsVideoPlayer.tsx b/web/src/components/player/HlsVideoPlayer.tsx index 93f1da702..4b1bfe5ef 100644 --- a/web/src/components/player/HlsVideoPlayer.tsx +++ b/web/src/components/player/HlsVideoPlayer.tsx @@ -139,7 +139,7 @@ export default function HlsVideoPlayer({ if (hlsRef.current) { hlsRef.current.destroy(); } - } + }; }, [videoRef, hlsRef, useHlsCompat, currentSource]); // state handling diff --git a/web/src/views/settings/FrigatePlusSettingsView.tsx b/web/src/views/settings/FrigatePlusSettingsView.tsx index e721aa13c..fb9a28d16 100644 --- a/web/src/views/settings/FrigatePlusSettingsView.tsx +++ b/web/src/views/settings/FrigatePlusSettingsView.tsx @@ -390,7 +390,6 @@ export default function FrigatePlusSettingsView({ className="cursor-pointer" value={id} disabled={ - model.type != config.model.model_type || !model.supportedDetectors.includes( Object.values(config.detectors)[0] .type, From c5ed95ec52bbaec0be7578ab90cf199fc52470e9 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 8 Sep 2025 06:43:04 -0600 Subject: [PATCH 08/17] More inference speed updates (#19947) * More inference speed updates * Update hardware.md * Update hardware.md * Update index.md * More inference speeds * Update home-assistant.md * Update object_detectors.md * Update first_model.md --- docs/docs/configuration/object_detectors.md | 2 +- docs/docs/frigate/hardware.md | 25 ++++++++++++--------- docs/docs/integrations/home-assistant.md | 20 +++++++++++++++++ docs/docs/plus/first_model.md | 6 +++++ docs/docs/plus/index.md | 2 +- 5 files changed, 42 insertions(+), 13 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 18dc683a9..b54d8797d 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -1040,7 +1040,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/ WORKDIR /yolov9 ADD https://github.com/WongKinYiu/yolov9.git . RUN uv pip install --system -r requirements.txt -RUN uv pip install --system onnx onnxruntime onnx-simplifier>=0.4.1 +RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 ARG MODEL_SIZE ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt RUN sed -i "s/ckpt = torch.load(attempt_download(w), map_location='cpu')/ckpt = torch.load(attempt_download(w), map_location='cpu', weights_only=False)/g" models/experimental.py diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 7e4c721ab..9a0a6bcee 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -132,17 +132,19 @@ More information is available [in the detector docs](/configuration/object_detec Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: -| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | -| -------------- | -------------------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- | -| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance | -| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | | -| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | | -| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | | -| Intel UHD 770 | ~ 15 ms | t-320: 24: ms s-320: 30 ms | 320: ~ 20 ms 640: ~ 46 ms | | | -| Intel N100 | ~ 15 ms | s-320: 30ms | 320: ~ 25 ms | | Can only run one detector instance | -| Intel Iris XE | ~ 10 ms | | 320: ~ 18 ms 640: ~ 50 ms | | | -| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | -| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | +| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | +| -------------- | -------------------------- | --------------------------------------- | ------------------------- | ---------------------- | ---------------------------------- | +| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance | +| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | | +| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | | +| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | | +| Intel UHD 770 | ~ 15 ms | t-320: 24 ms s-320: 30 ms s-640: 45 ms | 320: ~ 20 ms 640: ~ 46 ms | | | +| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance | +| Intel N150 | ~ 15 ms | t-320: 16ms s-320: 24 ms | | | | +| Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | | +| Intel Arc A310 | | s-320: 9 ms | | | | +| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | +| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | ### TensorRT - Nvidia GPU @@ -169,6 +171,7 @@ Inference speeds will vary greatly depending on the GPU and the model used. | Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | | --------------- | --------------------- | ------------------------- | ---------------------- | +| GTX 1070 | s-320: 16 ms | 320: 14 ms | | | RTX 3050 | t-320: 15 ms | 320: ~ 10 ms 640: ~ 16 ms | Nano-320: ~ 12 ms | | RTX 3070 | t-320: 11 ms | 320: ~ 8 ms 640: ~ 14 ms | Nano-320: ~ 9 ms | | RTX A4000 | | 320: ~ 15 ms | | diff --git a/docs/docs/integrations/home-assistant.md b/docs/docs/integrations/home-assistant.md index 2ce09b275..169a7ad31 100644 --- a/docs/docs/integrations/home-assistant.md +++ b/docs/docs/integrations/home-assistant.md @@ -185,6 +185,26 @@ For clips to be castable to media devices, audio is required and may need to be +## Camera API + +To disable a camera dynamically + +``` +action: camera.turn_off +data: {} +target: + entity_id: camera.back_deck_cam # your Frigate camera entity ID +``` + +To enable a camera that has been disabled dynamically + +``` +action: camera.turn_on +data: {} +target: + entity_id: camera.back_deck_cam # your Frigate camera entity ID +``` + ## Notification API Many people do not want to expose Frigate to the web, so the integration creates some public API endpoints that can be used for notifications. diff --git a/docs/docs/plus/first_model.md b/docs/docs/plus/first_model.md index 7e483d402..adec174d9 100644 --- a/docs/docs/plus/first_model.md +++ b/docs/docs/plus/first_model.md @@ -34,6 +34,12 @@ Model IDs are not secret values and can be shared freely. Access to your model i ::: +:::tip + +When setting the plus model id, all other fields should be removed as these are configured automatically with the Frigate+ model config + +::: + ## Step 4: Adjust your object filters for higher scores Frigate+ models generally have much higher scores than the default model provided in Frigate. You will likely need to increase your `threshold` and `min_score` values. Here is an example of how these values can be refined, but you should expect these to evolve as your model improves. For more information about how `threshold` and `min_score` are related, see the docs on [object filters](../configuration/object_filters.md#object-scores). diff --git a/docs/docs/plus/index.md b/docs/docs/plus/index.md index fddd9ad5b..fa8f86f9c 100644 --- a/docs/docs/plus/index.md +++ b/docs/docs/plus/index.md @@ -51,7 +51,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` | | [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolov9` | | [NVidia GPU](/configuration/object_detectors#onnx) | `onnx` | `yolov9` | -| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `rocm` | `yolov9` | +| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `onnx` | `yolov9` | | [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` | | [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` | From 880902cdd7d57182c407d362bee0b4ad41931d06 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 8 Sep 2025 09:29:03 -0500 Subject: [PATCH 09/17] Add specific notes for frigate+ models in object detector docs (#19971) --- docs/docs/configuration/object_detectors.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index b54d8797d..fe57a2c9d 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -29,6 +29,7 @@ Frigate supports multiple different detectors that work on different types of ha - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured. **Nvidia Jetson** + - [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models. - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured. @@ -325,6 +326,12 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv ::: +:::warning + +If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. + +::: + After placing the downloaded onnx model in your config folder, you can use the following configuration: ```yaml @@ -533,6 +540,12 @@ There is no default model provided, the following formats are supported: [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. +:::warning + +If you are using a Frigate+ YOLO-NAS model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. + +::: + After placing the downloaded onnx model in your config folder, you can use the following configuration: ```yaml @@ -560,6 +573,12 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv ::: +:::warning + +If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. + +::: + After placing the downloaded onnx model in your config folder, you can use the following configuration: ```yaml From f46f8a21609ea89815a408a684d42b6579c943d4 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 8 Sep 2025 10:39:33 -0600 Subject: [PATCH 10/17] More inference speed updates (#19974) --- docs/docs/frigate/hardware.md | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 9a0a6bcee..113f922e2 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -132,19 +132,19 @@ More information is available [in the detector docs](/configuration/object_detec Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: -| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | -| -------------- | -------------------------- | --------------------------------------- | ------------------------- | ---------------------- | ---------------------------------- | -| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance | -| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | | -| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | | -| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | | -| Intel UHD 770 | ~ 15 ms | t-320: 24 ms s-320: 30 ms s-640: 45 ms | 320: ~ 20 ms 640: ~ 46 ms | | | -| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance | -| Intel N150 | ~ 15 ms | t-320: 16ms s-320: 24 ms | | | | -| Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | | -| Intel Arc A310 | | s-320: 9 ms | | | | -| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | -| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | +| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes | +| -------------- | -------------------------- | ------------------------------------------------- | ------------------------- | ---------------------- | ---------------------------------- | +| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance | +| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | | +| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | | +| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | | +| Intel UHD 770 | ~ 15 ms | t-320: 24 ms s-320: 30 ms s-640: 45 ms | 320: ~ 20 ms 640: ~ 46 ms | | | +| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance | +| Intel N150 | ~ 15 ms | t-320: 16ms s-320: 24 ms | | | | +| Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | | +| Intel Arc A310 | | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | | | | +| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | +| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | ### TensorRT - Nvidia GPU @@ -169,13 +169,13 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben Inference speeds will vary greatly depending on the GPU and the model used. `tiny` variants are faster than the equivalent non-tiny model, some known examples are below: -| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | -| --------------- | --------------------- | ------------------------- | ---------------------- | -| GTX 1070 | s-320: 16 ms | 320: 14 ms | | -| RTX 3050 | t-320: 15 ms | 320: ~ 10 ms 640: ~ 16 ms | Nano-320: ~ 12 ms | -| RTX 3070 | t-320: 11 ms | 320: ~ 8 ms 640: ~ 14 ms | Nano-320: ~ 9 ms | -| RTX A4000 | | 320: ~ 15 ms | | -| Tesla P40 | | 320: ~ 105 ms | | +| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | +| --------------- | ------------------------- | ------------------------- | ---------------------- | +| GTX 1070 | s-320: 16 ms | 320: 14 ms | | +| RTX 3050 | t-320: 15 ms s-320: 17 ms | 320: ~ 10 ms 640: ~ 16 ms | Nano-320: ~ 12 ms | +| RTX 3070 | t-320: 11 ms s-320: 13 ms | 320: ~ 8 ms 640: ~ 14 ms | Nano-320: ~ 9 ms | +| RTX A4000 | | 320: ~ 15 ms | | +| Tesla P40 | | 320: ~ 105 ms | | ### ROCm - AMD GPU From 205fdf3ae3c71bdc745f1f8be8d89feeec79f989 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 9 Sep 2025 06:17:56 -0600 Subject: [PATCH 11/17] Fixes (#19984) * Always handle RKNN as NHWC in Frigate+ model loading * Correct Intel stats * Update inference time docs * Update version * Adjust inference speeds --- Makefile | 2 +- docs/docs/frigate/hardware.md | 6 +++--- frigate/detectors/detector_config.py | 4 ++++ frigate/util/services.py | 2 +- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index e414ed65c..fa692b681 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.16.1 +VERSION = 0.16.2 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) BOARDS= #Initialized empty diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 113f922e2..fcb98573f 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -138,11 +138,11 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp | Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | | | Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | | | Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | | -| Intel UHD 770 | ~ 15 ms | t-320: 24 ms s-320: 30 ms s-640: 45 ms | 320: ~ 20 ms 640: ~ 46 ms | | | +| Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | | | Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance | -| Intel N150 | ~ 15 ms | t-320: 16ms s-320: 24 ms | | | | +| Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | | | Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | | -| Intel Arc A310 | | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | | | | +| Intel Arc A310 | ~ 5 ms | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | 320: ~ 8 ms 640: ~ 14 ms | | | | Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | | Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index d7883523d..7ee04bde5 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -161,6 +161,10 @@ class ModelConfig(BaseModel): if model_info.get("inputDataType"): self.input_dtype = model_info["inputDataType"] + # RKNN always uses NHWC + if detector == "rknn": + self.input_tensor = InputTensorEnum.nhwc + # generate list of attribute labels self.attributes_map = { **model_info.get("attributes", DEFAULT_ATTRIBUTE_LABEL_MAP), diff --git a/frigate/util/services.py b/frigate/util/services.py index 185770eb7..b31a7eea3 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -301,7 +301,7 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s "-o", "-", "-s", - "1", + "1000", # Intel changed this from seconds to milliseconds in 2024+ versions ] if intel_gpu_device: From 1613499218da15ee5ba529aed515ed43067ae3ec Mon Sep 17 00:00:00 2001 From: laviddichterman Date: Tue, 9 Sep 2025 13:27:30 -0700 Subject: [PATCH 12/17] Update object_detectors.md to document configuring image size in YOLO 9 (#19951) * Update object_detectors.md for v16 * add configurability to IMG_SIZE for YOLOv9 export * remove TensorRT detector as it's no longer supported in v16 * Revert removing NVIDIA TensorRT detector docs Added documentation for NVidia TensorRT Detector, including model generation, configuration parameters, and example usage. * Dumb copy/paste * Enhance YOLOv9 export instructions in documentation Updated YOLOv9 export command to include IMG_SIZE parameter and clarified model size options. --- docs/docs/configuration/object_detectors.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index fe57a2c9d..f537d320b 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -1049,10 +1049,10 @@ python3 yolo_to_onnx.py -m yolov7-320 #### YOLOv9 -YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available sizes are `t`, `s`, `m`, `c`, and `e`). +YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` and `IMG_SIZE=320` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available model sizes are `t`, `s`, `m`, `c`, and `e`, common image sizes are `320` and `640`). ```sh -docker build . --build-arg MODEL_SIZE=t --output . -f- <<'EOF' +docker build . --build-arg MODEL_SIZE=t --build-arg IMG_SIZE=320 --output . -f- <<'EOF' FROM python:3.11 AS build RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/* COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/ @@ -1061,11 +1061,13 @@ ADD https://github.com/WongKinYiu/yolov9.git . RUN uv pip install --system -r requirements.txt RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 ARG MODEL_SIZE +ARG IMG_SIZE ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt RUN sed -i "s/ckpt = torch.load(attempt_download(w), map_location='cpu')/ckpt = torch.load(attempt_download(w), map_location='cpu', weights_only=False)/g" models/experimental.py -RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz 320 --simplify --include onnx +RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz ${IMG_SIZE} --simplify --include onnx FROM scratch ARG MODEL_SIZE -COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx / +ARG IMG_SIZE +COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx /yolov9-${MODEL_SIZE}-${IMG_SIZE}.onnx EOF ``` From 037c4d1cc09f4de553d1700a210b16cfeae48e9f Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 9 Sep 2025 16:53:26 -0600 Subject: [PATCH 13/17] Don't block UI while pulling the stream live info (#19998) --- web/src/hooks/use-camera-live-mode.ts | 44 ++++++++++++++++++--------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/web/src/hooks/use-camera-live-mode.ts b/web/src/hooks/use-camera-live-mode.ts index 76689b9bc..17dfaee91 100644 --- a/web/src/hooks/use-camera-live-mode.ts +++ b/web/src/hooks/use-camera-live-mode.ts @@ -33,29 +33,43 @@ export default function useCameraLiveMode( const streamsFetcher = useCallback(async (key: string) => { const streamNames = key.split(","); - const metadata: { [key: string]: LiveStreamMetadata } = {}; - await Promise.all( - streamNames.map(async (streamName) => { - try { - const response = await fetch(`/api/go2rtc/streams/${streamName}`); - if (response.ok) { - const data = await response.json(); - metadata[streamName] = data; - } - } catch (error) { - // eslint-disable-next-line no-console - console.error(`Failed to fetch metadata for ${streamName}:`, error); + const metadataPromises = streamNames.map(async (streamName) => { + try { + const response = await fetch(`/api/go2rtc/streams/${streamName}`, { + priority: "low", + }); + + if (response.ok) { + const data = await response.json(); + return { streamName, data }; } - }), - ); + return { streamName, data: null }; + } catch (error) { + // eslint-disable-next-line no-console + console.error(`Failed to fetch metadata for ${streamName}:`, error); + return { streamName, data: null }; + } + }); + + const results = await Promise.allSettled(metadataPromises); + + const metadata: { [key: string]: LiveStreamMetadata } = {}; + results.forEach((result) => { + if (result.status === "fulfilled" && result.value.data) { + metadata[result.value.streamName] = result.value.data; + } + }); return metadata; }, []); const { data: allStreamMetadata = {} } = useSWR<{ [key: string]: LiveStreamMetadata; - }>(restreamedStreamsKey, streamsFetcher, { revalidateOnFocus: false }); + }>(restreamedStreamsKey, streamsFetcher, { + revalidateOnFocus: false, + dedupingInterval: 10000, + }); const [preferredLiveModes, setPreferredLiveModes] = useState<{ [key: string]: LivePlayerMode; From 7c7ff49b90cf32e89f64486b817e310210cd8963 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 11 Sep 2025 05:17:08 -0600 Subject: [PATCH 14/17] Improve d-fine model export docs (#20020) --- docs/docs/configuration/object_detectors.md | 39 +++++++++++---------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index f537d320b..1e68d6ff4 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -978,26 +978,29 @@ Here are some tips for getting different model types ### Downloading D-FINE Model -To export as ONNX: - -1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. -2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE). -3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)` -4. Run the export, making sure you select the right config, for your checkpoint. - -Example: +D-FINE can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=s` in the first line to `s`, `m`, or `l` size. +```sh +docker build . --build-arg MODEL_SIZE=s --output . -f- <<'EOF' +FROM python:3.11 AS build +RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/* +COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/ +WORKDIR /dfine +RUN git clone https://github.com/Peterande/D-FINE.git . +RUN uv pip install --system -r requirements.txt +RUN uv pip install --system onnx onnxruntime onnxsim +# Create output directory and download checkpoint +RUN mkdir -p output +ARG MODEL_SIZE +RUN wget https://github.com/Peterande/storage/releases/download/dfinev1.0/dfine_${MODEL_SIZE}_obj2coco.pth -O output/dfine_${MODEL_SIZE}_obj2coco.pth +# Modify line 58 of export_onnx.py to change batch size to 1 +RUN sed -i '58s/data = torch.rand(.*)/data = torch.rand(1, 3, 640, 640)/' tools/deployment/export_onnx.py +RUN python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_${MODEL_SIZE}_obj2coco.yml -r output/dfine_${MODEL_SIZE}_obj2coco.pth +FROM scratch +ARG MODEL_SIZE +COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL_SIZE}.onnx +EOF ``` -python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth -``` - -:::tip - -Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually. - -Make sure you change the batch size to 1 before exporting. - -::: ### Download RF-DETR Model From b08db4913f72c23638999f30633a91a85bdbee1d Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Sun, 14 Sep 2025 20:51:56 +0800 Subject: [PATCH 15/17] feat: add github mirror download endpoint (#20007) * feat: add github mirror download endpoint * fix: fix face_embedding endpoint line * fix: fix github raw endpoint Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- frigate/data_processing/real_time/bird.py | 7 +++++-- frigate/data_processing/real_time/face.py | 6 ++++-- frigate/detectors/plugins/rknn.py | 3 ++- frigate/embeddings/onnx/face_embedding.py | 6 ++++-- frigate/embeddings/onnx/lpr_embedding.py | 12 ++++++++---- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index 8d2c598fc..d547f2ddd 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -41,10 +41,13 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): self.detected_birds: dict[str, float] = {} self.labelmap: dict[int, str] = {} + GITHUB_RAW_ENDPOINT = os.environ.get( + "GITHUB_RAW_ENDPOINT", "https://raw.githubusercontent.com" + ) download_path = os.path.join(MODEL_CACHE_DIR, "bird") self.model_files = { - "bird.tflite": "https://raw.githubusercontent.com/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite", - "birdmap.txt": "https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt", + "bird.tflite": f"{GITHUB_RAW_ENDPOINT}/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite", + "birdmap.txt": f"{GITHUB_RAW_ENDPOINT}/google-coral/test_data/master/inat_bird_labels.txt", } if not all( diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 144ec42d4..5a6525362 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -60,10 +60,12 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.faces_per_second = EventsPerSecond() self.inference_speed = InferenceSpeed(self.metrics.face_rec_speed) + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") + download_path = os.path.join(MODEL_CACHE_DIR, "facedet") self.model_files = { - "facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx", - "landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml", + "facedet.onnx": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx", + "landmarkdet.yaml": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml", } if not all( diff --git a/frigate/detectors/plugins/rknn.py b/frigate/detectors/plugins/rknn.py index 828507c54..46fae3e62 100644 --- a/frigate/detectors/plugins/rknn.py +++ b/frigate/detectors/plugins/rknn.py @@ -139,8 +139,9 @@ class Rknn(DetectionApi): if not os.path.isdir(model_cache_dir): os.mkdir(model_cache_dir) + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") urllib.request.urlretrieve( - f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.2-2/{filename}", + f"{GITHUB_ENDPOINT}/MarcA711/rknn-models/releases/download/v2.3.2-2/{filename}", model_cache_dir + filename, ) diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index c0f35a581..eb04b43b2 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -24,11 +24,12 @@ FACENET_INPUT_SIZE = 160 class FaceNetEmbedding(BaseEmbedding): def __init__(self): + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") super().__init__( model_name="facedet", model_file="facenet.tflite", download_urls={ - "facenet.tflite": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite", + "facenet.tflite": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite", }, ) self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) @@ -110,11 +111,12 @@ class FaceNetEmbedding(BaseEmbedding): class ArcfaceEmbedding(BaseEmbedding): def __init__(self): + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") super().__init__( model_name="facedet", model_file="arcface.onnx", download_urls={ - "arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx", + "arcface.onnx": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx", }, ) self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) diff --git a/frigate/embeddings/onnx/lpr_embedding.py b/frigate/embeddings/onnx/lpr_embedding.py index ac981da8d..35ff5ceee 100644 --- a/frigate/embeddings/onnx/lpr_embedding.py +++ b/frigate/embeddings/onnx/lpr_embedding.py @@ -34,11 +34,12 @@ class PaddleOCRDetection(BaseEmbedding): model_file = ( "detection-large.onnx" if model_size == "large" else "detection-small.onnx" ) + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") super().__init__( model_name="paddleocr-onnx", model_file=model_file, download_urls={ - model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{model_file}" + model_file: f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{model_file}" }, ) self.requestor = requestor @@ -94,11 +95,12 @@ class PaddleOCRClassification(BaseEmbedding): requestor: InterProcessRequestor, device: str = "AUTO", ): + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") super().__init__( model_name="paddleocr-onnx", model_file="classification.onnx", download_urls={ - "classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx" + "classification.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx" }, ) self.requestor = requestor @@ -154,11 +156,12 @@ class PaddleOCRRecognition(BaseEmbedding): requestor: InterProcessRequestor, device: str = "AUTO", ): + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") super().__init__( model_name="paddleocr-onnx", model_file="recognition.onnx", download_urls={ - "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" + "recognition.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" }, ) self.requestor = requestor @@ -214,11 +217,12 @@ class LicensePlateDetector(BaseEmbedding): requestor: InterProcessRequestor, device: str = "AUTO", ): + GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com") super().__init__( model_name="yolov9_license_plate", model_file="yolov9-256-license-plates.onnx", download_urls={ - "yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx" + "yolov9-256-license-plates.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx" }, ) From bafdab9d67be1d7455c95583b65982a855f266db Mon Sep 17 00:00:00 2001 From: GuoQing Liu <842607283@qq.com> Date: Tue, 16 Sep 2025 20:14:27 +0800 Subject: [PATCH 16/17] feat: add robots.txt (#20093) --- web/public/robots.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 web/public/robots.txt diff --git a/web/public/robots.txt b/web/public/robots.txt new file mode 100644 index 000000000..77470cb39 --- /dev/null +++ b/web/public/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: / \ No newline at end of file From 4914029a504914eb50a957dc35f3808ec31801e5 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Tue, 16 Sep 2025 12:03:36 -0500 Subject: [PATCH 17/17] Add average_estimated_speed to mqtt docs (#20101) --- docs/docs/integrations/mqtt.md | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index afbc78e99..3ad435b81 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -29,12 +29,12 @@ Message published for each changed tracked object. The first message is publishe "camera": "front_door", "frame_time": 1607123961.837752, "snapshot": { - "frame_time": 1607123965.975463, - "box": [415, 489, 528, 700], - "area": 12728, - "region": [260, 446, 660, 846], - "score": 0.77546, - "attributes": [], + "frame_time": 1607123965.975463, + "box": [415, 489, 528, 700], + "area": 12728, + "region": [260, 446, 660, 846], + "score": 0.77546, + "attributes": [] }, "label": "person", "sub_label": null, @@ -61,6 +61,7 @@ Message published for each changed tracked object. The first message is publishe }, // attributes with top score that have been identified on the object at any point "current_attributes": [], // detailed data about the current attributes in this frame "current_estimated_speed": 0.71, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled + "average_estimated_speed": 14.3, // average estimated speed (mph or kph) for objects moving through zones with speed estimation enabled "velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled "recognized_license_plate": "ABC12345", // a recognized license plate for car objects "recognized_license_plate_score": 0.933451 @@ -70,12 +71,12 @@ Message published for each changed tracked object. The first message is publishe "camera": "front_door", "frame_time": 1607123962.082975, "snapshot": { - "frame_time": 1607123965.975463, - "box": [415, 489, 528, 700], - "area": 12728, - "region": [260, 446, 660, 846], - "score": 0.77546, - "attributes": [], + "frame_time": 1607123965.975463, + "box": [415, 489, 528, 700], + "area": 12728, + "region": [260, 446, 660, 846], + "score": 0.77546, + "attributes": [] }, "label": "person", "sub_label": ["John Smith", 0.79], @@ -109,6 +110,7 @@ Message published for each changed tracked object. The first message is publishe } ], "current_estimated_speed": 0.77, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled + "average_estimated_speed": 14.31, // average estimated speed (mph or kph) for objects moving through zones with speed estimation enabled "velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled "recognized_license_plate": "ABC12345", // a recognized license plate for car objects "recognized_license_plate_score": 0.933451 @@ -139,7 +141,7 @@ Message published for updates to tracked object metadata, for example: "name": "John", "score": 0.95, "camera": "front_door_cam", - "timestamp": 1607123958.748393, + "timestamp": 1607123958.748393 } ``` @@ -153,7 +155,7 @@ Message published for updates to tracked object metadata, for example: "plate": "123ABC", "score": 0.95, "camera": "driveway_cam", - "timestamp": 1607123958.748393, + "timestamp": 1607123958.748393 } ```