mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-10-04 07:06:58 +08:00
Compare commits
65 Commits
zmq-model-
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b652ce6bb3 | ||
![]() |
41e5c12e5b | ||
![]() |
8307fe31aa | ||
![]() |
1f061a8e73 | ||
![]() |
55d6383234 | ||
![]() |
caa187e4ed | ||
![]() |
4331ed0d7b | ||
![]() |
08309793d4 | ||
![]() |
c7a4e6bcc4 | ||
![]() |
c94446a472 | ||
![]() |
17b6128314 | ||
![]() |
117a878533 | ||
![]() |
ff5ebcf94d | ||
![]() |
24c519f032 | ||
![]() |
90fbb77ee0 | ||
![]() |
9f1d8b0e31 | ||
![]() |
875d20b195 | ||
![]() |
48056ac15c | ||
![]() |
993459152b | ||
![]() |
8430fbc705 | ||
![]() |
f7c4ff12f7 | ||
![]() |
8f0be18422 | ||
![]() |
28e3aa39f0 | ||
![]() |
16c88fa8ac | ||
![]() |
1b6c246a44 | ||
![]() |
e8b2828ca0 | ||
![]() |
923412ec1c | ||
![]() |
8b85cd816e | ||
![]() |
bebe99d9b8 | ||
![]() |
a08fda62f8 | ||
![]() |
9fdce80729 | ||
![]() |
12f8c3feac | ||
![]() |
b6552987b0 | ||
![]() |
c207009d8a | ||
![]() |
e6cbc93703 | ||
![]() |
b8b07ee6e1 | ||
![]() |
082867447b | ||
![]() |
8b293449f9 | ||
![]() |
2f209b2cf4 | ||
![]() |
9a22404015 | ||
![]() |
2c4a043dbb | ||
![]() |
b23355da53 | ||
![]() |
90db2d57b3 | ||
![]() |
652fdc6a38 | ||
![]() |
7e2f5a3017 | ||
![]() |
2f99a17e64 | ||
![]() |
2bc92cce81 | ||
![]() |
7f7eefef7f | ||
![]() |
4914029a50 | ||
![]() |
bafdab9d67 | ||
![]() |
b08db4913f | ||
![]() |
7c7ff49b90 | ||
![]() |
037c4d1cc0 | ||
![]() |
1613499218 | ||
![]() |
205fdf3ae3 | ||
![]() |
f46f8a2160 | ||
![]() |
880902cdd7 | ||
![]() |
c5ed95ec52 | ||
![]() |
751de141d5 | ||
![]() |
0eb441fe50 | ||
![]() |
7566aecb0b | ||
![]() |
60714a733e | ||
![]() |
d7f7cd7be1 | ||
![]() |
6591210050 | ||
![]() |
7e7b3288a8 |
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@@ -173,6 +173,31 @@ jobs:
|
||||
set: |
|
||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||
*.cache-from=type=gha
|
||||
synaptics_build:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
name: Synaptics Build
|
||||
needs:
|
||||
- arm64_build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Synaptics build
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: synaptics
|
||||
files: docker/synaptics/synaptics.hcl
|
||||
set: |
|
||||
synaptics.tags=${{ steps.setup.outputs.image-name }}-synaptics
|
||||
*.cache-from=type=gha
|
||||
# The majority of users running arm64 are rpi users, so the rpi
|
||||
# build should be the primary arm64 image
|
||||
assemble_default_build:
|
||||
|
55
.github/workflows/pull_request.yml
vendored
55
.github/workflows/pull_request.yml
vendored
@@ -4,38 +4,14 @@ on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- ".github/**"
|
||||
- ".github/*.yml"
|
||||
- ".github/DISCUSSION_TEMPLATE/**"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: 3.11
|
||||
|
||||
jobs:
|
||||
build_devcontainer:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build Devcontainer
|
||||
# The Dockerfile contains features that requires buildkit, and since the
|
||||
# devcontainer cli uses docker-compose to build the image, the only way to
|
||||
# ensure docker-compose uses buildkit is to explicitly enable it.
|
||||
env:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
- name: Install devcontainer cli
|
||||
run: npm install --global @devcontainers/cli
|
||||
- name: Build devcontainer
|
||||
run: devcontainer build --workspace-folder .
|
||||
# It would be nice to also test the following commands, but for some
|
||||
# reason they don't work even though in VS Code devcontainer works.
|
||||
# - name: Start devcontainer
|
||||
# run: devcontainer up --workspace-folder .
|
||||
# - name: Run devcontainer scripts
|
||||
# run: devcontainer run-user-commands --workspace-folder .
|
||||
|
||||
web_lint:
|
||||
name: Web - Lint
|
||||
runs-on: ubuntu-latest
|
||||
@@ -102,13 +78,18 @@ jobs:
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build
|
||||
run: make debug
|
||||
- name: Run mypy
|
||||
run: docker run --rm --entrypoint=python3 frigate:latest -u -m mypy --config-file frigate/mypy.ini frigate
|
||||
- name: Run tests
|
||||
run: docker run --rm --entrypoint=python3 frigate:latest -u -m unittest
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
- name: Install devcontainer cli
|
||||
run: npm install --global @devcontainers/cli
|
||||
- name: Build devcontainer
|
||||
env:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
run: devcontainer build --workspace-folder .
|
||||
- name: Start devcontainer
|
||||
run: devcontainer up --workspace-folder .
|
||||
- name: Run mypy in devcontainer
|
||||
run: devcontainer exec --workspace-folder . bash -lc "python3 -u -m mypy --config-file frigate/mypy.ini frigate"
|
||||
- name: Run unit tests in devcontainer
|
||||
run: devcontainer exec --workspace-folder . bash -lc "python3 -u -m unittest"
|
||||
|
@@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM scratch AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.9/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
|
||||
FROM wget AS tempio
|
||||
ARG TARGETARCH
|
||||
|
@@ -50,6 +50,38 @@ function set_libva_version() {
|
||||
export LIBAVFORMAT_VERSION_MAJOR
|
||||
}
|
||||
|
||||
function setup_homekit_config() {
|
||||
local config_path="$1"
|
||||
|
||||
if [[ ! -f "${config_path}" ]]; then
|
||||
echo "[INFO] Creating empty HomeKit config file..."
|
||||
echo '{}' > "${config_path}"
|
||||
fi
|
||||
|
||||
# Convert YAML to JSON for jq processing
|
||||
local temp_json="/tmp/cache/homekit_config.json"
|
||||
yq eval -o=json "${config_path}" > "${temp_json}" 2>/dev/null || {
|
||||
echo "[WARNING] Failed to convert HomeKit config to JSON, skipping cleanup"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Use jq to filter and keep only the homekit section
|
||||
local cleaned_json="/tmp/cache/homekit_cleaned.json"
|
||||
jq '
|
||||
# Keep only the homekit section if it exists, otherwise empty object
|
||||
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
|
||||
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
|
||||
|
||||
# Convert back to YAML and write to the config file
|
||||
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
|
||||
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
|
||||
echo '{"homekit": {}}' > "${config_path}"
|
||||
}
|
||||
|
||||
# Clean up temp files
|
||||
rm -f "${temp_json}" "${cleaned_json}"
|
||||
}
|
||||
|
||||
set_libva_version
|
||||
|
||||
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
@@ -70,6 +102,10 @@ else
|
||||
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
|
||||
fi
|
||||
|
||||
# HomeKit configuration persistence setup
|
||||
readonly homekit_config_path="/config/go2rtc_homekit.yml"
|
||||
setup_homekit_config "${homekit_config_path}"
|
||||
|
||||
readonly config_path="/config"
|
||||
|
||||
if [[ -x "${config_path}/go2rtc" ]]; then
|
||||
@@ -82,5 +118,7 @@ fi
|
||||
echo "[INFO] Starting go2rtc..."
|
||||
|
||||
# Replace the bash process with the go2rtc process, redirecting stderr to stdout
|
||||
# Use HomeKit config as the primary config so writebacks go there
|
||||
# The main config from Frigate will be loaded as a secondary config
|
||||
exec 2>&1
|
||||
exec "${binary_path}" -config=/dev/shm/go2rtc.yaml
|
||||
exec "${binary_path}" -config="${homekit_config_path}" -config=/dev/shm/go2rtc.yaml
|
||||
|
@@ -17,7 +17,9 @@ http {
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
'"$http_user_agent" "$http_x_forwarded_for" '
|
||||
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
|
||||
|
||||
|
||||
access_log /dev/stdout main;
|
||||
|
||||
|
@@ -15,14 +15,14 @@ ARG AMDGPU
|
||||
|
||||
RUN apt update -qq && \
|
||||
apt install -y wget gpg && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/6.4.1/ubuntu/jammy/amdgpu-install_6.4.60401-1_all.deb && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.0.1/ubuntu/jammy/amdgpu-install_7.0.1.70001-1_all.deb && \
|
||||
apt install -y ./rocm.deb && \
|
||||
apt update && \
|
||||
apt install -qq -y rocm
|
||||
|
||||
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
|
||||
RUN cd /opt/rocm-$ROCM/lib && \
|
||||
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
|
||||
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* librocroller.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
|
||||
mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \
|
||||
cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib
|
||||
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
|
||||
@@ -64,11 +64,10 @@ COPY --from=rocm /opt/rocm-dist/ /
|
||||
|
||||
#######################################################################
|
||||
FROM deps-prelim AS rocm-prelim-hsa-override0
|
||||
ENV HSA_ENABLE_SDMA=0
|
||||
ENV TF_ROCM_USE_IMMEDIATE_MODE=1
|
||||
|
||||
# avoid kernel crashes
|
||||
ENV HIP_FORCE_DEV_KERNARG=1
|
||||
ENV MIGRAPHX_DISABLE_MIOPEN_FUSION=1
|
||||
ENV MIGRAPHX_DISABLE_SCHEDULE_PASS=1
|
||||
ENV MIGRAPHX_DISABLE_REDUCE_FUSION=1
|
||||
ENV MIGRAPHX_ENABLE_HIPRTC_WORKAROUNDS=1
|
||||
|
||||
COPY --from=rocm-dist / /
|
||||
|
||||
|
@@ -1 +1 @@
|
||||
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.4.1/onnxruntime_rocm-1.21.1-cp311-cp311-linux_x86_64.whl
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.0.1/onnxruntime_migraphx-1.23.0-cp311-cp311-linux_x86_64.whl
|
@@ -2,7 +2,7 @@ variable "AMDGPU" {
|
||||
default = "gfx900"
|
||||
}
|
||||
variable "ROCM" {
|
||||
default = "6.4.1"
|
||||
default = "7.0.1"
|
||||
}
|
||||
variable "HSA_OVERRIDE_GFX_VERSION" {
|
||||
default = ""
|
||||
|
28
docker/synaptics/Dockerfile
Normal file
28
docker/synaptics/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
FROM wheels AS synap1680-wheels
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install dependencies
|
||||
RUN wget -qO- "https://github.com/GaryHuang-ASUS/synaptics_astra_sdk/releases/download/v1.5.0/Synaptics-SL1680-v1.5.0-rt.tar" | tar -C / -xzf -
|
||||
RUN wget -P /wheels/ "https://github.com/synaptics-synap/synap-python/releases/download/v0.0.4-preview/synap_python-0.0.4-cp311-cp311-manylinux_2_35_aarch64.whl"
|
||||
|
||||
FROM deps AS synap1680-deps
|
||||
ARG TARGETARCH
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
|
||||
RUN --mount=type=bind,from=synap1680-wheels,source=/wheels,target=/deps/synap-wheels \
|
||||
pip3 install --no-deps -U /deps/synap-wheels/*.whl
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
COPY --from=synap1680-wheels /rootfs/usr/local/lib/*.so /usr/lib
|
||||
|
||||
ADD https://raw.githubusercontent.com/synaptics-astra/synap-release/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80/model.synap /synaptics/mobilenet.synap
|
27
docker/synaptics/synaptics.hcl
Normal file
27
docker/synaptics/synaptics.hcl
Normal file
@@ -0,0 +1,27 @@
|
||||
target wheels {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64"]
|
||||
target = "wheels"
|
||||
}
|
||||
|
||||
target deps {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64"]
|
||||
target = "deps"
|
||||
}
|
||||
|
||||
target rootfs {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64"]
|
||||
target = "rootfs"
|
||||
}
|
||||
|
||||
target synaptics {
|
||||
dockerfile = "docker/synaptics/Dockerfile"
|
||||
contexts = {
|
||||
wheels = "target:wheels",
|
||||
deps = "target:deps",
|
||||
rootfs = "target:rootfs"
|
||||
}
|
||||
platforms = ["linux/arm64"]
|
||||
}
|
15
docker/synaptics/synaptics.mk
Normal file
15
docker/synaptics/synaptics.mk
Normal file
@@ -0,0 +1,15 @@
|
||||
BOARDS += synaptics
|
||||
|
||||
local-synaptics: version
|
||||
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
|
||||
--set synaptics.tags=frigate:latest-synaptics \
|
||||
--load
|
||||
|
||||
build-synaptics: version
|
||||
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
|
||||
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics
|
||||
|
||||
push-synaptics: build-synaptics
|
||||
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
|
||||
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics \
|
||||
--push
|
@@ -177,9 +177,11 @@ listen [::]:5000 ipv6only=off;
|
||||
By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
|
||||
|
||||
### Set Base Path via HTTP Header
|
||||
|
||||
The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy.
|
||||
|
||||
For example, in Nginx:
|
||||
|
||||
```
|
||||
location /frigate {
|
||||
proxy_set_header X-Ingress-Path /frigate;
|
||||
@@ -188,9 +190,11 @@ location /frigate {
|
||||
```
|
||||
|
||||
### Set Base Path via Environment Variable
|
||||
|
||||
When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
services:
|
||||
frigate:
|
||||
@@ -200,6 +204,7 @@ services:
|
||||
```
|
||||
|
||||
This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http):
|
||||
|
||||
```
|
||||
tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate
|
||||
```
|
||||
@@ -218,7 +223,7 @@ To do this:
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.9, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
@@ -147,7 +147,7 @@ WEB Digest Algorithm - MD5
|
||||
Reolink has many different camera models with inconsistently supported features and behavior. The below table shows a summary of various features and recommendations.
|
||||
|
||||
| Camera Resolution | Camera Generation | Recommended Stream Type | Additional Notes |
|
||||
|-------------------|---------------------------|-----------------------------------|-------------------------------------------------------------------------|
|
||||
| ----------------- | ------------------------- | --------------------------------- | ----------------------------------------------------------------------- |
|
||||
| 5MP or lower | All | http-flv | Stream is h264 |
|
||||
| 6MP or higher | Latest (ex: Duo3, CX-8##) | http-flv with ffmpeg 8.0, or rtsp | This uses the new http-flv-enhanced over H265 which requires ffmpeg 8.0 |
|
||||
| 6MP or higher | Older (ex: RLC-8##) | rtsp | |
|
||||
@@ -231,7 +231,7 @@ go2rtc:
|
||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||
```
|
||||
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-rtsp)
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||
|
||||
@@ -250,6 +250,7 @@ TP-Link VIGI cameras need some adjustments to the main stream settings on the ca
|
||||
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
|
||||
|
||||
- Preparation outside of Frigate:
|
||||
|
||||
- Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0`
|
||||
- Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back.
|
||||
- If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed.
|
||||
@@ -277,5 +278,3 @@ cameras:
|
||||
width: 1024
|
||||
height: 576
|
||||
```
|
||||
|
||||
|
||||
|
@@ -0,0 +1,73 @@
|
||||
---
|
||||
id: object_classification
|
||||
title: Object Classification
|
||||
---
|
||||
|
||||
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
### Sub label vs Attribute
|
||||
|
||||
- **Sub label**:
|
||||
|
||||
- Applied to the object’s `sub_label` field.
|
||||
- Ideal for a single, more specific identity or type.
|
||||
- Example: `cat` → `Leo`, `Charlie`, `None`.
|
||||
|
||||
- **Attribute**:
|
||||
- Added as metadata to the object (visible in /events): `<model_name>: <predicted_value>`.
|
||||
- Ideal when multiple attributes can coexist independently.
|
||||
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
|
||||
|
||||
## Example use cases
|
||||
|
||||
### Sub label
|
||||
|
||||
- **Known pet vs unknown**: For `dog` objects, set sub label to your pet’s name (e.g., `buddy`) or `none` for others.
|
||||
- **Mail truck vs normal car**: For `car`, classify as `mail_truck` vs `car` to filter important arrivals.
|
||||
- **Delivery vs non-delivery person**: For `person`, classify `delivery` vs `visitor` based on uniform/props.
|
||||
|
||||
### Attributes
|
||||
|
||||
- **Backpack**: For `person`, add attribute `backpack: yes/no`.
|
||||
- **Helmet**: For `person` (worksite), add `helmet: yes/no`.
|
||||
- **Leash**: For `dog`, add `leash: yes/no` (useful for park or yard rules).
|
||||
- **Ladder rack**: For `truck`, add `ladder_rack: yes/no` to flag service vehicles.
|
||||
|
||||
## Configuration
|
||||
|
||||
Object classification is configured as a custom classification model. Each model has its own name and settings. You must list which object labels should be classified.
|
||||
|
||||
```yaml
|
||||
classification:
|
||||
custom:
|
||||
dog:
|
||||
threshold: 0.8
|
||||
object_config:
|
||||
objects: [dog] # object labels to classify
|
||||
classification_type: sub_label # or: attribute
|
||||
```
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page.
|
||||
|
||||
### Getting Started
|
||||
|
||||
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
|
||||
|
||||
// TODO add this section once UI is implemented. Explain process of selecting objects and curating training examples.
|
||||
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
|
||||
- **Data collection**: Use the model’s Train tab to gather balanced examples across times of day, weather, and distances.
|
||||
- **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered.
|
||||
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
|
||||
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.
|
@@ -0,0 +1,52 @@
|
||||
---
|
||||
id: state_classification
|
||||
title: State Classification
|
||||
---
|
||||
|
||||
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
## Example use cases
|
||||
|
||||
- **Door state**: Detect if a garage or front door is open vs closed.
|
||||
- **Gate state**: Track if a driveway gate is open or closed.
|
||||
- **Trash day**: Bins at curb vs no bins present.
|
||||
- **Pool cover**: Cover on vs off.
|
||||
|
||||
## Configuration
|
||||
|
||||
State classification is configured as a custom classification model. Each model has its own name and settings. You must provide at least one camera crop under `state_config.cameras`.
|
||||
|
||||
```yaml
|
||||
classification:
|
||||
custom:
|
||||
front_door:
|
||||
threshold: 0.8
|
||||
state_config:
|
||||
motion: true # run when motion overlaps the crop
|
||||
interval: 10 # also run every N seconds (optional)
|
||||
cameras:
|
||||
front:
|
||||
crop: [0, 180, 220, 400]
|
||||
```
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page.
|
||||
|
||||
### Getting Started
|
||||
|
||||
When choosing a portion of the camera frame for state classification, it is important to make the crop tight around the area of interest to avoid extra signals unrelated to what is being classified.
|
||||
|
||||
// TODO add this section once UI is implemented. Explain process of selecting a crop.
|
||||
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
|
||||
- **Data collection**: Use the model’s Train tab to gather balanced examples across times of day and weather.
|
@@ -27,13 +27,26 @@ Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||
|
||||
:::info
|
||||
|
||||
Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger sizes are more capable of complex tasks and understanding of situations, but requires more memory and computational resources. It is recommended to try multiple models and experiment to see which performs best.
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. https://github.com/skye-harris/ollama-modelfiles contains optimized model configs for this task.
|
||||
|
||||
:::
|
||||
|
||||
The following models are recommended:
|
||||
|
||||
| Model | Size | Notes |
|
||||
| ----------------- | ------ | ----------------------------------------------------------- |
|
||||
| `gemma3:4b` | 3.3 GB | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5vl:3b` | 3.2 GB | Fast but capable model with good vision comprehension |
|
||||
| `llava-phi3:3.8b` | 2.9 GB | Lightweight and fast model with vision comprehension |
|
||||
| Model | Notes |
|
||||
| ----------------- | ----------------------------------------------------------- |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension
|
||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5vl` | Fast but capable model with good vision comprehension |
|
||||
| `llava-phi3` | Lightweight and fast model with vision comprehension |
|
||||
|
||||
:::note
|
||||
|
||||
@@ -50,6 +63,8 @@ genai:
|
||||
model: minicpm-v:8b
|
||||
provider_options: # other Ollama client options can be defined
|
||||
keep_alive: -1
|
||||
options:
|
||||
num_ctx: 8192 # make sure the context matches other services that are using ollama
|
||||
```
|
||||
|
||||
## Google Gemini
|
||||
@@ -124,4 +139,4 @@ genai:
|
||||
provider: azure_openai
|
||||
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
```
|
||||
```
|
||||
|
@@ -27,6 +27,18 @@ Threat-level definitions:
|
||||
|
||||
This will show in the UI as a list of concerns that each review item has along with the general description.
|
||||
|
||||
### Defining Typical Activity
|
||||
|
||||
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response. The default `activity_context_prompt` is below:
|
||||
|
||||
```
|
||||
- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
|
||||
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
|
||||
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
|
||||
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
|
||||
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
|
||||
```
|
||||
|
||||
### Additional Concerns
|
||||
|
||||
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
|
||||
|
@@ -427,3 +427,29 @@ cameras:
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
## Synaptics
|
||||
|
||||
Hardware accelerated video de-/encoding is supported on Synpatics SL-series SoC.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Make sure to follow the [Synaptics specific installation instructions](/frigate/installation#synaptics).
|
||||
|
||||
### Configuration
|
||||
|
||||
Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
hwaccel_args: -c:v h264_v4l2m2m
|
||||
input_args: preset-rtsp-restream
|
||||
output_args:
|
||||
record: preset-record-generic-audio-aac
|
||||
```
|
||||
|
||||
:::warning
|
||||
|
||||
Make sure that your SoC supports hardware acceleration for your input stream and your input stream is h264 encoding. For example, if your camera streams with h264 encoding, your SoC must be able to de- and encode with it. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
|
||||
|
||||
:::
|
||||
|
@@ -176,7 +176,7 @@ For devices that support two way talk, Frigate can be configured to use the feat
|
||||
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||
|
||||
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` *usually* supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
|
||||
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` _usually_ supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
|
||||
|
||||
### Streaming options on camera group dashboards
|
||||
|
||||
@@ -230,7 +230,26 @@ Note that disabling a camera through the config file (`enabled: False`) removes
|
||||
|
||||
If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again.
|
||||
|
||||
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations).
|
||||
Errors in stream playback (e.g., connection failures, codec issues, or buffering timeouts) that cause the fallback to low bandwidth mode (jsmpeg) are logged to the browser console for easier debugging. These errors may include:
|
||||
|
||||
- Network issues (e.g., MSE or WebRTC network connection problems).
|
||||
- Unsupported codecs or stream formats (e.g., H.265 in WebRTC, which is not supported in some browsers).
|
||||
- Buffering timeouts or low bandwidth conditions causing fallback to jsmpeg.
|
||||
- Browser compatibility problems (e.g., iOS Safari limitations with MSE).
|
||||
|
||||
To view browser console logs:
|
||||
|
||||
1. Open the Frigate Live View in your browser.
|
||||
2. Open the browser's Developer Tools (F12 or right-click > Inspect > Console tab).
|
||||
3. Reproduce the error (e.g., load a problematic stream or simulate network issues).
|
||||
4. Look for messages prefixed with the camera name.
|
||||
|
||||
These logs help identify if the issue is player-specific (MSE vs. WebRTC) or related to camera configuration (e.g., go2rtc streams, codecs). If you see frequent errors:
|
||||
|
||||
- Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera_settings_recommendations)).
|
||||
- Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS).
|
||||
- Test with a different stream via the UI dropdown (if `live -> streams` is configured).
|
||||
- For WebRTC-specific issues, ensure port 8555 is forwarded and candidates are set (see (WebRTC Extra Configuration)(#webrtc-extra-configuration)).
|
||||
|
||||
3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**
|
||||
|
||||
@@ -253,3 +272,7 @@ Note that disabling a camera through the config file (`enabled: False`) removes
|
||||
6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?**
|
||||
|
||||
If your camera is streaming (as indicated by a red dot in the upper right, or if it has been set to continuous streaming mode), your browser may be blocking audio until you interact with the page. This is an intentional browser limitation. See [this article](https://developer.mozilla.org/en-US/docs/Web/Media/Autoplay_guide#autoplay_availability). Many browsers have a whitelist feature to change this behavior.
|
||||
|
||||
7. **My camera streams have lots of visual artifacts / distortion.**
|
||||
|
||||
Some cameras don't include the hardware to support multiple connections to the high resolution stream, and this can cause unexpected behavior. In this case it is recommended to [restream](./restream.md) the high resolution stream so that it can be used for live view and recordings.
|
||||
|
@@ -35,6 +35,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia Jetson**
|
||||
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
@@ -42,6 +43,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
||||
|
||||
**Synaptics**
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
|
||||
|
||||
**For Testing**
|
||||
|
||||
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
||||
@@ -331,6 +336,12 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
|
||||
|
||||
:::
|
||||
|
||||
:::warning
|
||||
|
||||
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
@@ -442,12 +453,13 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
When Frigate is started with the following config it will connect to the detector client and transfer the model automatically:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx:
|
||||
type: onnx
|
||||
apple-silicon:
|
||||
type: zmq
|
||||
endpoint: tcp://host.docker.internal:5555
|
||||
|
||||
model:
|
||||
model_type: yolo-generic
|
||||
@@ -543,6 +555,17 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/
|
||||
|
||||
### ROCm Supported Models
|
||||
|
||||
:::tip
|
||||
|
||||
The AMD GPU kernel is known problematic especially when converting models to mxr format. The recommended approach is:
|
||||
|
||||
1. Disable object detection in the config.
|
||||
2. Startup Frigate with the onnx detector configured, the main object detection model will be converted to mxr format and cached in the config directory.
|
||||
3. Once this is finished as indicated by the logs, enable object detection in the UI and confirm that it is working correctly.
|
||||
4. Re-enable object detection in the config.
|
||||
|
||||
:::
|
||||
|
||||
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
|
||||
|
||||
- D-FINE models are not supported
|
||||
@@ -592,6 +615,12 @@ There is no default model provided, the following formats are supported:
|
||||
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
|
||||
|
||||
:::warning
|
||||
|
||||
If you are using a Frigate+ YOLO-NAS model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
@@ -619,6 +648,12 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
|
||||
|
||||
:::
|
||||
|
||||
:::warning
|
||||
|
||||
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
@@ -757,19 +792,19 @@ To verify that the integration is working correctly, start Frigate and observe t
|
||||
|
||||
# Community Supported Detectors
|
||||
|
||||
## MemryX MX3
|
||||
## MemryX MX3
|
||||
|
||||
This detector is available for use with the MemryX MX3 accelerator M.2 module. Frigate supports the MX3 on compatible hardware platforms, providing efficient and high-performance object detection.
|
||||
This detector is available for use with the MemryX MX3 accelerator M.2 module. Frigate supports the MX3 on compatible hardware platforms, providing efficient and high-performance object detection.
|
||||
|
||||
See the [installation docs](../frigate/installation.md#memryx-mx3) for information on configuring the MemryX hardware.
|
||||
|
||||
To configure a MemryX detector, simply set the `type` attribute to `memryx` and follow the configuration guide below.
|
||||
|
||||
### Configuration
|
||||
### Configuration
|
||||
|
||||
To configure the MemryX detector, use the following example configuration:
|
||||
To configure the MemryX detector, use the following example configuration:
|
||||
|
||||
#### Single PCIe MemryX MX3
|
||||
#### Single PCIe MemryX MX3
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -795,7 +830,7 @@ detectors:
|
||||
device: PCIe:2
|
||||
```
|
||||
|
||||
### Supported Models
|
||||
### Supported Models
|
||||
|
||||
MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the container at `/memryx_models/model_folder/`.
|
||||
|
||||
@@ -809,9 +844,9 @@ The input size for **YOLO-NAS** can be set to either **320x320** (default) or **
|
||||
|
||||
- The default size of **320x320** is optimized for lower CPU usage and faster inference times.
|
||||
|
||||
##### Configuration
|
||||
##### Configuration
|
||||
|
||||
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
|
||||
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -833,13 +868,13 @@ model:
|
||||
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
|
||||
```
|
||||
|
||||
#### YOLOv9
|
||||
#### YOLOv9
|
||||
|
||||
The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
|
||||
|
||||
##### Configuration
|
||||
|
||||
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
|
||||
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -848,7 +883,7 @@ detectors:
|
||||
device: PCIe:0
|
||||
|
||||
model:
|
||||
model_type: yolo-generic
|
||||
model_type: yolo-generic
|
||||
width: 320 # (Can be set to 640 for higher resolution)
|
||||
height: 320 # (Can be set to 640 for higher resolution)
|
||||
input_tensor: nchw
|
||||
@@ -861,13 +896,13 @@ model:
|
||||
# └── yolov9_post.onnx (optional; only if the model includes a cropped post-processing network)
|
||||
```
|
||||
|
||||
#### YOLOX
|
||||
#### YOLOX
|
||||
|
||||
The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) and precompiled to DFP.
|
||||
|
||||
##### Configuration
|
||||
##### Configuration
|
||||
|
||||
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
|
||||
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -888,13 +923,13 @@ model:
|
||||
# ├── yolox.dfp (a file ending with .dfp)
|
||||
```
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmmlab.com/model/mmdet-det/ssdlite-e8679f.onnx) and has been converted to DFP.
|
||||
|
||||
##### Configuration
|
||||
##### Configuration
|
||||
|
||||
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
|
||||
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -1029,6 +1064,41 @@ model:
|
||||
height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416
|
||||
```
|
||||
|
||||
## Synaptics
|
||||
|
||||
Hardware accelerated object detection is supported on the following SoCs:
|
||||
|
||||
- SL1680
|
||||
|
||||
This implementation uses the [Synaptics model conversion](https://synaptics-synap.github.io/doc/v/latest/docs/manual/introduction.html#offline-model-conversion), version v3.1.0.
|
||||
|
||||
This implementation is based on sdk `v1.5.0`.
|
||||
|
||||
See the [installation docs](../frigate/installation.md#synaptics) for information on configuring the SL-series NPU hardware.
|
||||
|
||||
### Configuration
|
||||
|
||||
When configuring the Synap detector, you have to specify the model: a local **path**.
|
||||
|
||||
#### SSD Mobilenet
|
||||
|
||||
A synap model is provided in the container at /mobilenet.synap and is used by this detector type by default. The model comes from [Synap-release Github](https://github.com/synaptics-astra/synap-release/tree/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80).
|
||||
|
||||
Use the model configuration shown below when using the synaptics detector with the default synap model:
|
||||
|
||||
```yaml
|
||||
detectors: # required
|
||||
synap_npu: # required
|
||||
type: synaptics # required
|
||||
|
||||
model: # required
|
||||
path: /synaptics/mobilenet.synap # required
|
||||
width: 224 # required
|
||||
height: 224 # required
|
||||
tensor_format: nhwc # default value (optional. If you change the model, it is required)
|
||||
labelmap_path: /labelmap/coco-80.txt # required
|
||||
```
|
||||
|
||||
## Rockchip platform
|
||||
|
||||
Hardware accelerated object detection is supported on the following SoCs:
|
||||
@@ -1303,26 +1373,29 @@ Here are some tips for getting different model types
|
||||
|
||||
### Downloading D-FINE Model
|
||||
|
||||
To export as ONNX:
|
||||
|
||||
1. Clone: https://github.com/Peterande/D-FINE and install all dependencies.
|
||||
2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE).
|
||||
3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)`
|
||||
4. Run the export, making sure you select the right config, for your checkpoint.
|
||||
|
||||
Example:
|
||||
D-FINE can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=s` in the first line to `s`, `m`, or `l` size.
|
||||
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=s --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /dfine
|
||||
RUN git clone https://github.com/Peterande/D-FINE.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx onnxruntime onnxsim
|
||||
# Create output directory and download checkpoint
|
||||
RUN mkdir -p output
|
||||
ARG MODEL_SIZE
|
||||
RUN wget https://github.com/Peterande/storage/releases/download/dfinev1.0/dfine_${MODEL_SIZE}_obj2coco.pth -O output/dfine_${MODEL_SIZE}_obj2coco.pth
|
||||
# Modify line 58 of export_onnx.py to change batch size to 1
|
||||
RUN sed -i '58s/data = torch.rand(.*)/data = torch.rand(1, 3, 640, 640)/' tools/deployment/export_onnx.py
|
||||
RUN python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_${MODEL_SIZE}_obj2coco.yml -r output/dfine_${MODEL_SIZE}_obj2coco.pth
|
||||
FROM scratch
|
||||
ARG MODEL_SIZE
|
||||
COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL_SIZE}.onnx
|
||||
EOF
|
||||
```
|
||||
python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth
|
||||
```
|
||||
|
||||
:::tip
|
||||
|
||||
Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually.
|
||||
|
||||
Make sure you change the batch size to 1 before exporting.
|
||||
|
||||
:::
|
||||
|
||||
### Download RF-DETR Model
|
||||
|
||||
@@ -1374,23 +1447,25 @@ python3 yolo_to_onnx.py -m yolov7-320
|
||||
|
||||
#### YOLOv9
|
||||
|
||||
YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available sizes are `t`, `s`, `m`, `c`, and `e`).
|
||||
YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` and `IMG_SIZE=320` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available model sizes are `t`, `s`, `m`, `c`, and `e`, common image sizes are `320` and `640`).
|
||||
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=t --output . -f- <<'EOF'
|
||||
docker build . --build-arg MODEL_SIZE=t --build-arg IMG_SIZE=320 --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /yolov9
|
||||
ADD https://github.com/WongKinYiu/yolov9.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx onnxruntime onnx-simplifier>=0.4.1
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1
|
||||
ARG MODEL_SIZE
|
||||
ARG IMG_SIZE
|
||||
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt
|
||||
RUN sed -i "s/ckpt = torch.load(attempt_download(w), map_location='cpu')/ckpt = torch.load(attempt_download(w), map_location='cpu', weights_only=False)/g" models/experimental.py
|
||||
RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz 320 --simplify --include onnx
|
||||
RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz ${IMG_SIZE} --simplify --include onnx
|
||||
FROM scratch
|
||||
ARG MODEL_SIZE
|
||||
COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx /
|
||||
ARG IMG_SIZE
|
||||
COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx /yolov9-${MODEL_SIZE}-${IMG_SIZE}.onnx
|
||||
EOF
|
||||
```
|
||||
|
@@ -287,6 +287,9 @@ detect:
|
||||
max_disappeared: 25
|
||||
# Optional: Configuration for stationary object tracking
|
||||
stationary:
|
||||
# Optional: Stationary classifier that uses visual characteristics to determine if an object
|
||||
# is stationary even if the box changes enough to be considered motion (default: shown below).
|
||||
classifier: True
|
||||
# Optional: Frequency for confirming stationary objects (default: same as threshold)
|
||||
# When set to 1, object detection will run to confirm the object still exists on every frame.
|
||||
# If set to 10, object detection will run to confirm the object still exists on every 10th frame.
|
||||
@@ -697,7 +700,7 @@ audio_transcription:
|
||||
language: en
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.9)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
# changing this port for the integrated go2rtc instance is not supported.
|
||||
go2rtc:
|
||||
|
@@ -7,7 +7,7 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.9) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
@@ -156,7 +156,7 @@ See [this comment](https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-22
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||
|
||||
|
@@ -56,6 +56,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
- Runs best with tiny or small size models
|
||||
|
||||
- [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
|
||||
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
|
||||
|
||||
- [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
|
||||
@@ -94,8 +95,21 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
- Runs best with tiny or small size models
|
||||
- Runs efficiently on low power hardware
|
||||
|
||||
**Synaptics**
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
|
||||
|
||||
:::
|
||||
|
||||
### Synaptics
|
||||
|
||||
- **Synaptics** Default model is **mobilenet**
|
||||
|
||||
| Name | Synaptics SL1680 Inference Time |
|
||||
| ---------------- | ------------------------------- |
|
||||
| ssd mobilenet | ~ 25 ms |
|
||||
| yolov5m | ~ 118 ms |
|
||||
|
||||
### Hailo-8
|
||||
|
||||
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isn’t provided.
|
||||
@@ -110,6 +124,7 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
|
||||
| Name | Hailo‑8 Inference Time | Hailo‑8L Inference Time |
|
||||
| ---------------- | ---------------------- | ----------------------- |
|
||||
| ssd mobilenet v1 | ~ 6 ms | ~ 10 ms |
|
||||
| yolov9-tiny | | 320: 18ms |
|
||||
| yolov6n | ~ 7 ms | ~ 11 ms |
|
||||
|
||||
### Google Coral TPU
|
||||
@@ -142,17 +157,19 @@ More information is available [in the detector docs](/configuration/object_detec
|
||||
|
||||
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
|
||||
|
||||
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
|
||||
| -------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- |
|
||||
| Intel HD 530 | 15 - 35 ms | | | Can only run one detector instance |
|
||||
| Intel HD 620 | 15 - 25 ms | 320: ~ 35 ms | | |
|
||||
| Intel HD 630 | ~ 15 ms | 320: ~ 30 ms | | |
|
||||
| Intel UHD 730 | ~ 10 ms | 320: ~ 19 ms 640: ~ 54 ms | | |
|
||||
| Intel UHD 770 | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
|
||||
| Intel N100 | ~ 15 ms | 320: ~ 25 ms | | Can only run one detector instance |
|
||||
| Intel Iris XE | ~ 10 ms | 320: ~ 18 ms 640: ~ 50 ms | | |
|
||||
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
|
||||
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | |
|
||||
| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
|
||||
| -------------- | -------------------------- | ------------------------------------------------- | ------------------------- | ---------------------- | ---------------------------------- |
|
||||
| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance |
|
||||
| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | |
|
||||
| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | |
|
||||
| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | |
|
||||
| Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
|
||||
| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance |
|
||||
| Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | |
|
||||
| Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | |
|
||||
| Intel Arc A310 | ~ 5 ms | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | 320: ~ 8 ms 640: ~ 14 ms | | |
|
||||
| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
|
||||
| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | |
|
||||
|
||||
### TensorRT - Nvidia GPU
|
||||
|
||||
@@ -160,7 +177,7 @@ Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA
|
||||
|
||||
#### Minimum Hardware Support
|
||||
|
||||
12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
@@ -180,12 +197,13 @@ Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
✅ - Accelerated with CUDA Graphs
|
||||
❌ - Not accelerated with CUDA Graphs
|
||||
|
||||
| Name | ✅ YOLOv9 Inference Time | ✅ RF-DETR Inference Time | ❌ YOLO-NAS Inference Time
|
||||
| --------------- | ------------------------ | ------------------------- | -------------------------- |
|
||||
| RTX 3050 | t-320: 8 ms s-320: 10 ms | Nano-320: ~ 12 ms | 320: ~ 10 ms 640: ~ 16 ms |
|
||||
| RTX 3070 | t-320: 6 ms s-320: 8 ms | Nano-320: ~ 9 ms | 320: ~ 8 ms 640: ~ 14 ms |
|
||||
| RTX A4000 | | | 320: ~ 15 ms |
|
||||
| Tesla P40 | | | 320: ~ 105 ms |
|
||||
| Name | ✅ YOLOv9 Inference Time | ✅ RF-DETR Inference Time | ❌ YOLO-NAS Inference Time |
|
||||
| --------- | ------------------------------------- | ------------------------- | -------------------------- |
|
||||
| GTX 1070 | s-320: 16 ms | | 320: 14 ms |
|
||||
| RTX 3050 | t-320: 8 ms s-320: 10 ms s-640: 28 ms | Nano-320: ~ 12 ms | 320: ~ 10 ms 640: ~ 16 ms |
|
||||
| RTX 3070 | t-320: 6 ms s-320: 8 ms s-640: 25 ms | Nano-320: ~ 9 ms | 320: ~ 8 ms 640: ~ 14 ms |
|
||||
| RTX A4000 | | | 320: ~ 15 ms |
|
||||
| Tesla P40 | | | 320: ~ 105 ms |
|
||||
|
||||
### Apple Silicon
|
||||
|
||||
@@ -197,18 +215,20 @@ Apple Silicon can not run within a container, so a ZMQ proxy is utilized to comm
|
||||
|
||||
:::
|
||||
|
||||
| Name | YOLOv9 Inference Time |
|
||||
| --------- | ---------------------- |
|
||||
| M3 Pro | t-320: 6 ms s-320: 8ms |
|
||||
| M1 | s-320: 9ms |
|
||||
| Name | YOLOv9 Inference Time |
|
||||
| ------ | ------------------------------------ |
|
||||
| M4 | s-320: 10 ms |
|
||||
| M3 Pro | t-320: 6 ms s-320: 8 ms s-640: 20 ms |
|
||||
| M1 | s-320: 9ms |
|
||||
|
||||
### ROCm - AMD GPU
|
||||
|
||||
With the [ROCm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
||||
|
||||
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------- | ------------------------- | ------------------------- |
|
||||
| AMD 780M | t-320: 14 ms s-320: 20 ms | 320: ~ 25 ms 640: ~ 50 ms |
|
||||
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------- | --------------------------- | ------------------------- |
|
||||
| AMD 780M | t-320: ~ 14 ms s-320: 20 ms | 320: ~ 25 ms 640: ~ 50 ms |
|
||||
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
|
||||
|
||||
## Community Supported Detectors
|
||||
|
||||
@@ -227,14 +247,14 @@ Detailed information is available [in the detector docs](/configuration/object_d
|
||||
The MX3 is a pipelined architecture, where the maximum frames per second supported (and thus supported number of cameras) cannot be calculated as `1/latency` (1/"Inference Time") and is measured separately. When estimating how many camera streams you may support with your configuration, use the **MX3 Total FPS** column to approximate of the detector's limit, not the Inference Time.
|
||||
|
||||
| Model | Input Size | MX3 Inference Time | MX3 Total FPS |
|
||||
|----------------------|------------|--------------------|---------------|
|
||||
| -------------------- | ---------- | ------------------ | ------------- |
|
||||
| YOLO-NAS-Small | 320 | ~ 9 ms | ~ 378 |
|
||||
| YOLO-NAS-Small | 640 | ~ 21 ms | ~ 138 |
|
||||
| YOLOv9s | 320 | ~ 16 ms | ~ 382 |
|
||||
| YOLOv9s | 640 | ~ 41 ms | ~ 110 |
|
||||
| YOLOX-Small | 640 | ~ 16 ms | ~ 263 |
|
||||
| SSDlite MobileNet v2 | 320 | ~ 5 ms | ~ 1056 |
|
||||
|
||||
|
||||
Inference speeds may vary depending on the host platform. The above data was measured on an **Intel 13700 CPU**. Platforms like Raspberry Pi, Orange Pi, and other ARM-based SBCs have different levels of processing capability, which may limit total FPS.
|
||||
|
||||
### Nvidia Jetson
|
||||
|
@@ -256,6 +256,37 @@ or add these options to your `docker run` command:
|
||||
|
||||
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform).
|
||||
|
||||
### Synaptics
|
||||
|
||||
- SL1680
|
||||
|
||||
#### Setup
|
||||
|
||||
Follow Frigate's default installation instructions, but use a docker image with `-synaptics` suffix for example `ghcr.io/blakeblackshear/frigate:stable-synaptics`.
|
||||
|
||||
Next, you need to grant docker permissions to access your hardware:
|
||||
|
||||
- During the configuration process, you should run docker in privileged mode to avoid any errors due to insufficient permissions. To do so, add `privileged: true` to your `docker-compose.yml` file or the `--privileged` flag to your docker run command.
|
||||
|
||||
```yaml
|
||||
devices:
|
||||
- /dev/synap
|
||||
- /dev/video0
|
||||
- /dev/video1
|
||||
```
|
||||
|
||||
or add these options to your `docker run` command:
|
||||
|
||||
```
|
||||
--device /dev/synap \
|
||||
--device /dev/video0 \
|
||||
--device /dev/video1
|
||||
```
|
||||
|
||||
#### Configuration
|
||||
|
||||
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
|
||||
|
||||
## Docker
|
||||
|
||||
Running through Docker with Docker Compose is the recommended install method.
|
||||
|
@@ -5,7 +5,7 @@ title: Updating
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.16.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.0).
|
||||
The current stable version of Frigate is **0.16.1**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.1).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
@@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.0` instead of `0.15.2`). For example:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.1` instead of `0.15.2`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.0
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.0
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.0`, `0.16.0-tensorrt`, or `stable`):
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.1`, `0.16.1-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.0
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
|
@@ -3,17 +3,15 @@ id: configuring_go2rtc
|
||||
title: Configuring go2rtc
|
||||
---
|
||||
|
||||
# Configuring go2rtc
|
||||
|
||||
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
|
||||
|
||||
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
|
||||
- Live stream support for cameras in Home Assistant Integration
|
||||
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
|
||||
|
||||
# Setup a go2rtc stream
|
||||
## Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -49,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
||||
- Check Video Codec:
|
||||
|
||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
@@ -111,11 +109,11 @@ section.
|
||||
|
||||
:::
|
||||
|
||||
## Next steps
|
||||
### Next steps
|
||||
|
||||
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
|
||||
2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router.
|
||||
|
||||
## Important considerations
|
||||
## Homekit Configuration
|
||||
|
||||
If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts.
|
||||
To add camera streams to Homekit Frigate must be configured in docker to use `host` networking mode. Once that is done, you can use the go2rtc WebUI (accessed via port 1984, which is disabled by default) to share export a camera to Homekit. Any changes made will automatically be saved to `/config/go2rtc_homekit.yml`.
|
@@ -185,6 +185,26 @@ For clips to be castable to media devices, audio is required and may need to be
|
||||
|
||||
<a name="api"></a>
|
||||
|
||||
## Camera API
|
||||
|
||||
To disable a camera dynamically
|
||||
|
||||
```
|
||||
action: camera.turn_off
|
||||
data: {}
|
||||
target:
|
||||
entity_id: camera.back_deck_cam # your Frigate camera entity ID
|
||||
```
|
||||
|
||||
To enable a camera that has been disabled dynamically
|
||||
|
||||
```
|
||||
action: camera.turn_on
|
||||
data: {}
|
||||
target:
|
||||
entity_id: camera.back_deck_cam # your Frigate camera entity ID
|
||||
```
|
||||
|
||||
## Notification API
|
||||
|
||||
Many people do not want to expose Frigate to the web, so the integration creates some public API endpoints that can be used for notifications.
|
||||
|
@@ -29,12 +29,12 @@ Message published for each changed tracked object. The first message is publishe
|
||||
"camera": "front_door",
|
||||
"frame_time": 1607123961.837752,
|
||||
"snapshot": {
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": [],
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": []
|
||||
},
|
||||
"label": "person",
|
||||
"sub_label": null,
|
||||
@@ -61,6 +61,7 @@ Message published for each changed tracked object. The first message is publishe
|
||||
}, // attributes with top score that have been identified on the object at any point
|
||||
"current_attributes": [], // detailed data about the current attributes in this frame
|
||||
"current_estimated_speed": 0.71, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
|
||||
"average_estimated_speed": 14.3, // average estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
|
||||
"velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
|
||||
"recognized_license_plate": "ABC12345", // a recognized license plate for car objects
|
||||
"recognized_license_plate_score": 0.933451
|
||||
@@ -70,12 +71,12 @@ Message published for each changed tracked object. The first message is publishe
|
||||
"camera": "front_door",
|
||||
"frame_time": 1607123962.082975,
|
||||
"snapshot": {
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": [],
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": []
|
||||
},
|
||||
"label": "person",
|
||||
"sub_label": ["John Smith", 0.79],
|
||||
@@ -109,6 +110,7 @@ Message published for each changed tracked object. The first message is publishe
|
||||
}
|
||||
],
|
||||
"current_estimated_speed": 0.77, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
|
||||
"average_estimated_speed": 14.31, // average estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
|
||||
"velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
|
||||
"recognized_license_plate": "ABC12345", // a recognized license plate for car objects
|
||||
"recognized_license_plate_score": 0.933451
|
||||
|
@@ -34,6 +34,12 @@ Model IDs are not secret values and can be shared freely. Access to your model i
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
When setting the plus model id, all other fields should be removed as these are configured automatically with the Frigate+ model config
|
||||
|
||||
:::
|
||||
|
||||
## Step 4: Adjust your object filters for higher scores
|
||||
|
||||
Frigate+ models generally have much higher scores than the default model provided in Frigate. You will likely need to increase your `threshold` and `min_score` values. Here is an example of how these values can be refined, but you should expect these to evolve as your model improves. For more information about how `threshold` and `min_score` are related, see the docs on [object filters](../configuration/object_filters.md#object-scores).
|
||||
|
@@ -11,34 +11,51 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ
|
||||
|
||||
## Available model types
|
||||
|
||||
There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||
There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yolov9`. All of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
|
||||
|
||||
| Model Type | Description |
|
||||
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| Model Type | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, Apple Silicon\*, and Rockchip NPUs. |
|
||||
|
||||
_\* Support coming in 0.17_
|
||||
|
||||
### YOLOv9 Details
|
||||
|
||||
YOLOv9 models are available in `s` and `t` sizes. When requesting a `yolov9` model, you will be prompted to choose a size. If you are unsure what size to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`.
|
||||
|
||||
:::info
|
||||
|
||||
When switching to YOLOv9, you may need to adjust your thresholds for some objects.
|
||||
|
||||
:::
|
||||
|
||||
#### Hailo Support
|
||||
|
||||
If you have a Hailo device, you will need to specify the hardware you have when submitting a model request because they are not cross compatible. Please test using the available base models before submitting your model request.
|
||||
|
||||
#### Rockchip (RKNN) Support
|
||||
|
||||
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is coming in 0.17.
|
||||
|
||||
## Supported detector types
|
||||
|
||||
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), and ONNX (`onnx`) detectors.
|
||||
|
||||
:::warning
|
||||
|
||||
Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later.
|
||||
|
||||
:::
|
||||
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip\* (`rknn`) detectors.
|
||||
|
||||
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||
| -------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||
| [NVidia GPU](/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolov9` |
|
||||
| [NVidia GPU](/configuration/object_detectors#onnx) | `onnx` | `yolov9` |
|
||||
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `onnx` | `yolov9` |
|
||||
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
|
||||
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
|
||||
|
||||
_\* Requires Frigate 0.15_
|
||||
_\* Requires manual conversion in 0.16. Automatic conversion coming in 0.17._
|
||||
|
||||
## Improving your model
|
||||
|
||||
|
@@ -5,14 +5,14 @@ import frigateHttpApiSidebar from "./docs/integrations/api/sidebar";
|
||||
const sidebars: SidebarsConfig = {
|
||||
docs: {
|
||||
Frigate: [
|
||||
'frigate/index',
|
||||
'frigate/hardware',
|
||||
'frigate/planning_setup',
|
||||
'frigate/installation',
|
||||
'frigate/updating',
|
||||
'frigate/camera_setup',
|
||||
'frigate/video_pipeline',
|
||||
'frigate/glossary',
|
||||
"frigate/index",
|
||||
"frigate/hardware",
|
||||
"frigate/planning_setup",
|
||||
"frigate/installation",
|
||||
"frigate/updating",
|
||||
"frigate/camera_setup",
|
||||
"frigate/video_pipeline",
|
||||
"frigate/glossary",
|
||||
],
|
||||
Guides: [
|
||||
"guides/getting_started",
|
||||
@@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
|
||||
{
|
||||
type: "link",
|
||||
label: "Go2RTC Configuration Reference",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
|
||||
} as PropSidebarItemLink,
|
||||
],
|
||||
Detectors: [
|
||||
@@ -40,6 +40,19 @@ const sidebars: SidebarsConfig = {
|
||||
"configuration/face_recognition",
|
||||
"configuration/license_plate_recognition",
|
||||
"configuration/bird_classification",
|
||||
{
|
||||
type: "category",
|
||||
label: "Custom Classification",
|
||||
link: {
|
||||
type: "generated-index",
|
||||
title: "Custom Classification",
|
||||
description: "Configuration for custom classification models",
|
||||
},
|
||||
items: [
|
||||
"configuration/custom_classification/state_classification",
|
||||
"configuration/custom_classification/object_classification",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Generative AI",
|
||||
@@ -106,11 +119,11 @@ const sidebars: SidebarsConfig = {
|
||||
"configuration/metrics",
|
||||
"integrations/third_party_extensions",
|
||||
],
|
||||
'Frigate+': [
|
||||
'plus/index',
|
||||
'plus/annotating',
|
||||
'plus/first_model',
|
||||
'plus/faq',
|
||||
"Frigate+": [
|
||||
"plus/index",
|
||||
"plus/annotating",
|
||||
"plus/first_model",
|
||||
"plus/faq",
|
||||
],
|
||||
Troubleshooting: [
|
||||
"troubleshooting/faqs",
|
||||
|
@@ -822,9 +822,9 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
|
||||
async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
|
||||
"""VOD for specific hour. Uses the default timezone (UTC)."""
|
||||
return vod_hour(
|
||||
return await vod_hour(
|
||||
year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
|
||||
)
|
||||
|
||||
@@ -834,7 +834,9 @@ def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str)
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str):
|
||||
async def vod_hour(
|
||||
year_month: str, day: int, hour: int, camera_name: str, tz_name: str
|
||||
):
|
||||
parts = year_month.split("-")
|
||||
start_date = (
|
||||
datetime(int(parts[0]), int(parts[1]), day, hour, tzinfo=timezone.utc)
|
||||
@@ -844,7 +846,7 @@ def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: st
|
||||
start_ts = start_date.timestamp()
|
||||
end_ts = end_date.timestamp()
|
||||
|
||||
return vod_ts(camera_name, start_ts, end_ts)
|
||||
return await vod_ts(camera_name, start_ts, end_ts)
|
||||
|
||||
|
||||
@router.get(
|
||||
@@ -875,7 +877,7 @@ async def vod_event(
|
||||
if event.end_time is None
|
||||
else (event.end_time + padding)
|
||||
)
|
||||
vod_response = vod_ts(event.camera, event.start_time - padding, end_ts)
|
||||
vod_response = await vod_ts(event.camera, event.start_time - padding, end_ts)
|
||||
|
||||
# If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
|
||||
if (
|
||||
@@ -1248,7 +1250,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/clip.mp4")
|
||||
def event_clip(
|
||||
async def event_clip(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
padding: int = Query(0, description="Padding to apply to clip."),
|
||||
@@ -1270,7 +1272,9 @@ def event_clip(
|
||||
if event.end_time is None
|
||||
else event.end_time + padding
|
||||
)
|
||||
return recording_clip(request, event.camera, event.start_time - padding, end_ts)
|
||||
return await recording_clip(
|
||||
request, event.camera, event.start_time - padding, end_ts
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/preview.gif")
|
||||
@@ -1698,7 +1702,7 @@ def preview_thumbnail(file_name: str):
|
||||
"/{camera_name}/{label}/thumbnail.jpg",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
)
|
||||
def label_thumbnail(request: Request, camera_name: str, label: str):
|
||||
async def label_thumbnail(request: Request, camera_name: str, label: str):
|
||||
label = unquote(label)
|
||||
event_query = Event.select(fn.MAX(Event.id)).where(Event.camera == camera_name)
|
||||
if label != "any":
|
||||
@@ -1707,7 +1711,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
|
||||
try:
|
||||
event_id = event_query.scalar()
|
||||
|
||||
return event_thumbnail(request, event_id, 60)
|
||||
return await event_thumbnail(request, event_id, Extension.jpg, 60)
|
||||
except DoesNotExist:
|
||||
frame = np.zeros((175, 175, 3), np.uint8)
|
||||
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||
@@ -1722,7 +1726,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
|
||||
@router.get(
|
||||
"/{camera_name}/{label}/clip.mp4", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
def label_clip(request: Request, camera_name: str, label: str):
|
||||
async def label_clip(request: Request, camera_name: str, label: str):
|
||||
label = unquote(label)
|
||||
event_query = Event.select(fn.MAX(Event.id)).where(
|
||||
Event.camera == camera_name, Event.has_clip == True
|
||||
@@ -1733,7 +1737,7 @@ def label_clip(request: Request, camera_name: str, label: str):
|
||||
try:
|
||||
event = event_query.get()
|
||||
|
||||
return event_clip(request, event.id)
|
||||
return await event_clip(request, event.id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Event not found"}, status_code=404
|
||||
@@ -1743,7 +1747,7 @@ def label_clip(request: Request, camera_name: str, label: str):
|
||||
@router.get(
|
||||
"/{camera_name}/{label}/snapshot.jpg", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
def label_snapshot(request: Request, camera_name: str, label: str):
|
||||
async def label_snapshot(request: Request, camera_name: str, label: str):
|
||||
"""Returns the snapshot image from the latest event for the given camera and label combo"""
|
||||
label = unquote(label)
|
||||
if label == "any":
|
||||
@@ -1764,7 +1768,7 @@ def label_snapshot(request: Request, camera_name: str, label: str):
|
||||
|
||||
try:
|
||||
event: Event = event_query.get()
|
||||
return event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
|
||||
return await event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
|
||||
except DoesNotExist:
|
||||
frame = np.zeros((720, 1280, 3), np.uint8)
|
||||
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||
|
@@ -28,9 +28,6 @@ from frigate.comms.object_detector_signaler import DetectorProxy
|
||||
from frigate.comms.webpush import WebPushClient
|
||||
from frigate.comms.ws import WebSocketClient
|
||||
from frigate.comms.zmq_proxy import ZmqProxy
|
||||
from frigate.comms.zmq_req_router_broker import (
|
||||
ZmqReqRouterBroker,
|
||||
)
|
||||
from frigate.config.camera.updater import CameraConfigUpdatePublisher
|
||||
from frigate.config.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
@@ -310,14 +307,6 @@ class FrigateApp:
|
||||
self.event_metadata_updater = EventMetadataPublisher()
|
||||
self.inter_zmq_proxy = ZmqProxy()
|
||||
self.detection_proxy = DetectorProxy()
|
||||
self.zmq_router_broker: ZmqReqRouterBroker | None = None
|
||||
|
||||
zmq_detectors = [
|
||||
det for det in self.config.detectors.values() if det.type == "zmq"
|
||||
]
|
||||
if any(zmq_detectors):
|
||||
backend_endpoint = zmq_detectors[0].endpoint
|
||||
self.zmq_router_broker = ZmqReqRouterBroker(backend_endpoint)
|
||||
|
||||
def init_onvif(self) -> None:
|
||||
self.onvif_controller = OnvifController(self.config, self.ptz_metrics)
|
||||
@@ -655,9 +644,6 @@ class FrigateApp:
|
||||
self.inter_zmq_proxy.stop()
|
||||
self.detection_proxy.stop()
|
||||
|
||||
if self.zmq_router_broker:
|
||||
self.zmq_router_broker.stop()
|
||||
|
||||
while len(self.detection_shms) > 0:
|
||||
shm = self.detection_shms.pop()
|
||||
shm.close()
|
||||
|
@@ -2,6 +2,7 @@
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from .zmq_proxy import Publisher, Subscriber
|
||||
|
||||
@@ -10,18 +11,21 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class RecordingsDataTypeEnum(str, Enum):
|
||||
all = ""
|
||||
recordings_available_through = "recordings_available_through"
|
||||
saved = "saved" # segment has been saved to db
|
||||
latest = "latest" # segment is in cache
|
||||
valid = "valid" # segment is valid
|
||||
invalid = "invalid" # segment is invalid
|
||||
|
||||
|
||||
class RecordingsDataPublisher(Publisher[tuple[str, float]]):
|
||||
class RecordingsDataPublisher(Publisher[Any]):
|
||||
"""Publishes latest recording data."""
|
||||
|
||||
topic_base = "recordings/"
|
||||
|
||||
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
||||
super().__init__(topic.value)
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def publish(self, payload: tuple[str, float], sub_topic: str = "") -> None:
|
||||
def publish(self, payload: Any, sub_topic: str = "") -> None:
|
||||
super().publish(payload, sub_topic)
|
||||
|
||||
|
||||
@@ -32,3 +36,11 @@ class RecordingsDataSubscriber(Subscriber):
|
||||
|
||||
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
||||
super().__init__(topic.value)
|
||||
|
||||
def _return_object(
|
||||
self, topic: str, payload: tuple | None
|
||||
) -> tuple[str, Any] | tuple[None, None]:
|
||||
if payload is None:
|
||||
return (None, None)
|
||||
|
||||
return (topic, payload)
|
||||
|
@@ -1,61 +0,0 @@
|
||||
"""ZMQ REQ/ROUTER front-end to DEALER/REP back-end broker.
|
||||
|
||||
This module provides a small proxy that:
|
||||
- Binds a ROUTER socket on a fixed local endpoint for REQ clients
|
||||
- Connects a DEALER socket to the user-configured backend endpoint (REP servers)
|
||||
|
||||
Pattern: REQ -> ROUTER === proxy === DEALER -> REP
|
||||
|
||||
The goal is to allow multiple REQ clients and/or multiple backend workers
|
||||
to share a single configured connection, enabling multiple models/runners
|
||||
behind the same broker while keeping local clients stable via constants.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
|
||||
import zmq
|
||||
|
||||
REQ_ROUTER_ENDPOINT = "ipc:///tmp/cache/zmq_detector_router"
|
||||
|
||||
|
||||
class _RouterDealerRunner(threading.Thread):
|
||||
def __init__(self, context: zmq.Context[zmq.Socket], backend_endpoint: str) -> None:
|
||||
super().__init__(name="zmq_router_dealer_broker", daemon=True)
|
||||
self.context = context
|
||||
self.backend_endpoint = backend_endpoint
|
||||
|
||||
def run(self) -> None:
|
||||
frontend = self.context.socket(zmq.ROUTER)
|
||||
frontend.bind(REQ_ROUTER_ENDPOINT)
|
||||
|
||||
backend = self.context.socket(zmq.DEALER)
|
||||
backend.bind(self.backend_endpoint)
|
||||
|
||||
try:
|
||||
zmq.proxy(frontend, backend)
|
||||
except zmq.ZMQError:
|
||||
# Unblocked when context is destroyed in the controller
|
||||
pass
|
||||
|
||||
|
||||
class ZmqReqRouterBroker:
|
||||
"""Starts a ROUTER/DEALER proxy bridging local REQ clients to backend REP.
|
||||
|
||||
- ROUTER binds to REQ_ROUTER_ENDPOINT (constant, local)
|
||||
- DEALER connects to the provided backend_endpoint (user-configured)
|
||||
"""
|
||||
|
||||
def __init__(self, backend_endpoint: str) -> None:
|
||||
self.backend_endpoint = backend_endpoint
|
||||
self.context = zmq.Context()
|
||||
self.runner = _RouterDealerRunner(self.context, backend_endpoint)
|
||||
self.runner.start()
|
||||
|
||||
def stop(self) -> None:
|
||||
# Destroying the context signals the proxy to stop
|
||||
try:
|
||||
self.context.destroy()
|
||||
finally:
|
||||
self.runner.join()
|
@@ -29,6 +29,10 @@ class StationaryConfig(FrigateBaseModel):
|
||||
default_factory=StationaryMaxFramesConfig,
|
||||
title="Max frames for stationary objects.",
|
||||
)
|
||||
classifier: bool = Field(
|
||||
default=True,
|
||||
title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.",
|
||||
)
|
||||
|
||||
|
||||
class DetectConfig(FrigateBaseModel):
|
||||
|
@@ -92,6 +92,15 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
title="Preferred language for GenAI Response",
|
||||
default=None,
|
||||
)
|
||||
activity_context_prompt: str = Field(
|
||||
default="""- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
|
||||
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
|
||||
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
|
||||
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
|
||||
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
|
||||
""",
|
||||
title="Custom activity context prompt defining normal activity patterns for this property.",
|
||||
)
|
||||
|
||||
|
||||
class ReviewConfig(FrigateBaseModel):
|
||||
|
@@ -93,7 +93,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
if camera_config.review.genai.debug_save_thumbnails:
|
||||
id = data["after"]["id"]
|
||||
Path(os.path.join(CLIPS_DIR, f"genai-requests/{id}")).mkdir(
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", f"{id}")).mkdir(
|
||||
parents=True, exist_ok=True
|
||||
)
|
||||
shutil.copy(
|
||||
@@ -124,6 +124,9 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
if topic == EmbeddingsRequestEnum.summarize_review.value:
|
||||
start_ts = request_data["start_ts"]
|
||||
end_ts = request_data["end_ts"]
|
||||
logger.debug(
|
||||
f"Found GenAI Review Summary request for {start_ts} to {end_ts}"
|
||||
)
|
||||
items: list[dict[str, Any]] = [
|
||||
r["data"]["metadata"]
|
||||
for r in (
|
||||
@@ -141,7 +144,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
if len(items) == 0:
|
||||
logger.debug("No review items with metadata found during time period")
|
||||
return None
|
||||
return "No activity was found during this time."
|
||||
|
||||
important_items = list(
|
||||
filter(
|
||||
@@ -154,8 +157,16 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
if not important_items:
|
||||
return "No concerns were found during this time period."
|
||||
|
||||
if self.config.review.genai.debug_save_thumbnails:
|
||||
Path(
|
||||
os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}")
|
||||
).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return self.genai_client.generate_review_summary(
|
||||
start_ts, end_ts, important_items
|
||||
start_ts,
|
||||
end_ts,
|
||||
important_items,
|
||||
self.config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
else:
|
||||
return None
|
||||
@@ -248,6 +259,7 @@ def run_analysis(
|
||||
genai_config.additional_concerns,
|
||||
genai_config.preferred_language,
|
||||
genai_config.debug_save_thumbnails,
|
||||
genai_config.activity_context_prompt,
|
||||
)
|
||||
review_inference_speed.update(datetime.datetime.now().timestamp() - start)
|
||||
|
||||
|
@@ -19,3 +19,4 @@ class ReviewMetadata(BaseModel):
|
||||
default=None,
|
||||
description="Other concerns highlighted by the user that are observed.",
|
||||
)
|
||||
time: str | None = Field(default=None, description="Time of activity.")
|
||||
|
@@ -42,10 +42,13 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
self.detected_birds: dict[str, float] = {}
|
||||
self.labelmap: dict[int, str] = {}
|
||||
|
||||
GITHUB_RAW_ENDPOINT = os.environ.get(
|
||||
"GITHUB_RAW_ENDPOINT", "https://raw.githubusercontent.com"
|
||||
)
|
||||
download_path = os.path.join(MODEL_CACHE_DIR, "bird")
|
||||
self.model_files = {
|
||||
"bird.tflite": "https://raw.githubusercontent.com/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
|
||||
"birdmap.txt": "https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt",
|
||||
"bird.tflite": f"{GITHUB_RAW_ENDPOINT}/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
|
||||
"birdmap.txt": f"{GITHUB_RAW_ENDPOINT}/google-coral/test_data/master/inat_bird_labels.txt",
|
||||
}
|
||||
|
||||
if not all(
|
||||
|
@@ -48,9 +48,9 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.requestor = requestor
|
||||
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter = None
|
||||
self.tensor_input_details: dict[str, Any] = None
|
||||
self.tensor_output_details: dict[str, Any] = None
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.labelmap: dict[int, str] = {}
|
||||
self.classifications_per_second = EventsPerSecond()
|
||||
self.inference_speed = InferenceSpeed(
|
||||
@@ -61,17 +61,24 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
model_path = os.path.join(self.model_dir, "model.tflite")
|
||||
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
|
||||
|
||||
if not os.path.exists(model_path) or not os.path.exists(labelmap_path):
|
||||
self.interpreter = None
|
||||
self.tensor_input_details = None
|
||||
self.tensor_output_details = None
|
||||
self.labelmap = {}
|
||||
return
|
||||
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(self.model_dir, "model.tflite"),
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.labelmap = load_labels(
|
||||
os.path.join(self.model_dir, "labelmap.txt"),
|
||||
prefill=0,
|
||||
)
|
||||
self.labelmap = load_labels(labelmap_path, prefill=0)
|
||||
self.classifications_per_second.start()
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
@@ -140,6 +147,16 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
logger.warning("Failed to resize image for state classification")
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
)
|
||||
return
|
||||
|
||||
input = np.expand_dims(frame, axis=0)
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
|
||||
self.interpreter.invoke()
|
||||
@@ -197,10 +214,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.model_config = model_config
|
||||
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter = None
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.tensor_input_details: dict[str, Any] = None
|
||||
self.tensor_output_details: dict[str, Any] = None
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.detected_objects: dict[str, float] = {}
|
||||
self.labelmap: dict[int, str] = {}
|
||||
self.classifications_per_second = EventsPerSecond()
|
||||
@@ -211,17 +228,24 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
model_path = os.path.join(self.model_dir, "model.tflite")
|
||||
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
|
||||
|
||||
if not os.path.exists(model_path) or not os.path.exists(labelmap_path):
|
||||
self.interpreter = None
|
||||
self.tensor_input_details = None
|
||||
self.tensor_output_details = None
|
||||
self.labelmap = {}
|
||||
return
|
||||
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(self.model_dir, "model.tflite"),
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.labelmap = load_labels(
|
||||
os.path.join(self.model_dir, "labelmap.txt"),
|
||||
prefill=0,
|
||||
)
|
||||
self.labelmap = load_labels(labelmap_path, prefill=0)
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
self.classifications_per_second.update()
|
||||
@@ -265,6 +289,16 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
logger.warning("Failed to resize image for state classification")
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
)
|
||||
return
|
||||
|
||||
input = np.expand_dims(crop, axis=0)
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
|
||||
self.interpreter.invoke()
|
||||
|
@@ -60,10 +60,12 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
self.faces_per_second = EventsPerSecond()
|
||||
self.inference_speed = InferenceSpeed(self.metrics.face_rec_speed)
|
||||
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
|
||||
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
|
||||
self.model_files = {
|
||||
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
||||
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
||||
"facedet.onnx": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
||||
"landmarkdet.yaml": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
||||
}
|
||||
|
||||
if not all(
|
||||
|
@@ -1,17 +1,13 @@
|
||||
"""Base runner implementation for ONNX models."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import zmq
|
||||
|
||||
from frigate.comms.zmq_req_router_broker import REQ_ROUTER_ENDPOINT
|
||||
from frigate.util.model import get_ort_providers
|
||||
from frigate.util.rknn_converter import auto_convert_model, is_rknn_compatible
|
||||
|
||||
@@ -82,6 +78,21 @@ class BaseModelRunner(ABC):
|
||||
class ONNXModelRunner(BaseModelRunner):
|
||||
"""Run ONNX models using ONNX Runtime."""
|
||||
|
||||
@staticmethod
|
||||
def is_migraphx_complex_model(model_type: str) -> bool:
|
||||
# Import here to avoid circular imports
|
||||
from frigate.detectors.detector_config import ModelTypeEnum
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type in [
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
EnrichmentModelTypeEnum.facenet.value,
|
||||
ModelTypeEnum.rfdetr.value,
|
||||
ModelTypeEnum.dfine.value,
|
||||
]
|
||||
|
||||
def __init__(self, ort: ort.InferenceSession):
|
||||
self.ort = ort
|
||||
|
||||
@@ -305,187 +316,6 @@ class OpenVINOModelRunner(BaseModelRunner):
|
||||
return outputs
|
||||
|
||||
|
||||
class ZmqIpcRunner(BaseModelRunner):
|
||||
"""Runner that forwards inference over ZMQ REQ/ROUTER to backend workers.
|
||||
|
||||
This allows reusing the same interface as local runners while delegating
|
||||
inference to the external ZMQ workers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_path: str,
|
||||
model_type: str,
|
||||
request_timeout_ms: int = 200,
|
||||
linger_ms: int = 0,
|
||||
endpoint: str = REQ_ROUTER_ENDPOINT,
|
||||
):
|
||||
self.model_type = model_type
|
||||
self.model_path = model_path
|
||||
self.model_name = os.path.basename(model_path)
|
||||
self._endpoint = endpoint
|
||||
self._context = zmq.Context()
|
||||
self._socket = self._context.socket(zmq.REQ)
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, request_timeout_ms)
|
||||
self._socket.setsockopt(zmq.SNDTIMEO, request_timeout_ms)
|
||||
self._socket.setsockopt(zmq.LINGER, linger_ms)
|
||||
self._socket.connect(self._endpoint)
|
||||
self._model_ready = False
|
||||
self._io_lock = threading.Lock()
|
||||
|
||||
@staticmethod
|
||||
def is_complex_model(model_type: str) -> bool:
|
||||
# Import here to avoid circular imports
|
||||
from frigate.detectors.detector_config import ModelTypeEnum
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type in [
|
||||
ModelTypeEnum.yolonas.value,
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
]
|
||||
|
||||
def get_input_names(self) -> list[str]:
|
||||
if "vision" in self.model_name:
|
||||
return ["pixel_values"]
|
||||
elif "arcface" in self.model_name:
|
||||
return ["data"]
|
||||
else:
|
||||
return ["input"]
|
||||
|
||||
def get_input_width(self) -> int:
|
||||
# Not known/required for ZMQ forwarding
|
||||
return -1
|
||||
|
||||
def _build_header(self, tensor_input: np.ndarray) -> bytes:
|
||||
header: dict[str, object] = {
|
||||
"shape": list(tensor_input.shape),
|
||||
"dtype": str(tensor_input.dtype.name),
|
||||
"model_type": self.model_type,
|
||||
"model_name": self.model_name,
|
||||
}
|
||||
return json.dumps(header).encode("utf-8")
|
||||
|
||||
def _decode_response(self, frames: list[bytes]) -> np.ndarray:
|
||||
if len(frames) == 1:
|
||||
buf = frames[0]
|
||||
if len(buf) != 20 * 6 * 4:
|
||||
raise ValueError(f"Unexpected payload size: {len(buf)}")
|
||||
return np.frombuffer(buf, dtype=np.float32).reshape((20, 6))
|
||||
|
||||
if len(frames) >= 2:
|
||||
header = json.loads(frames[0].decode("utf-8"))
|
||||
shape = tuple(header.get("shape", []))
|
||||
dtype = np.dtype(header.get("dtype", "float32"))
|
||||
return np.frombuffer(frames[1], dtype=dtype).reshape(shape)
|
||||
|
||||
raise ValueError("Empty or malformed reply from ZMQ detector")
|
||||
|
||||
def run(self, input: dict[str, np.ndarray]) -> np.ndarray | None:
|
||||
if not self._model_ready:
|
||||
if not self.ensure_model_ready(self.model_path):
|
||||
raise TimeoutError("ZMQ detector model is not ready after transfer")
|
||||
self._model_ready = True
|
||||
|
||||
input_name = next(iter(input))
|
||||
tensor = input[input_name]
|
||||
header = self._build_header(tensor)
|
||||
payload = memoryview(tensor.tobytes(order="C"))
|
||||
try:
|
||||
with self._io_lock:
|
||||
self._socket.send_multipart([header, payload])
|
||||
frames = self._socket.recv_multipart()
|
||||
except zmq.Again as e:
|
||||
raise TimeoutError("ZMQ detector request timed out") from e
|
||||
except zmq.ZMQError as e:
|
||||
raise RuntimeError(f"ZMQ error: {e}") from e
|
||||
|
||||
return self._decode_response(frames)
|
||||
|
||||
def ensure_model_ready(self, model_path: str) -> bool:
|
||||
"""Ensure the remote has the model and it is loaded.
|
||||
|
||||
1) Send model_request with model_name
|
||||
2) If not available, send model_data with the file contents
|
||||
3) Wait for loaded confirmation
|
||||
Returns True on success.
|
||||
"""
|
||||
# Check model availability
|
||||
req = {"model_request": True, "model_name": self.model_name}
|
||||
with self._io_lock:
|
||||
self._socket.send_multipart([json.dumps(req).encode("utf-8")])
|
||||
|
||||
# Temporarily extend timeout for model ops
|
||||
original_rcv = self._socket.getsockopt(zmq.RCVTIMEO)
|
||||
try:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, max(30000, int(original_rcv or 0)))
|
||||
resp_frames = self._socket.recv_multipart()
|
||||
except zmq.Again:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, original_rcv)
|
||||
return False
|
||||
finally:
|
||||
try:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, original_rcv)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
if len(resp_frames) != 1:
|
||||
return False
|
||||
resp = json.loads(resp_frames[0].decode("utf-8"))
|
||||
if resp.get("model_available") and resp.get("model_loaded"):
|
||||
logger.info(f"ZMQ detector model {self.model_name} is ready")
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
try:
|
||||
with open(model_path, "rb") as f:
|
||||
model_bytes = f.read()
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
header = {"model_data": True, "model_name": self.model_name}
|
||||
with self._io_lock:
|
||||
self._socket.send_multipart(
|
||||
[json.dumps(header).encode("utf-8"), model_bytes]
|
||||
)
|
||||
|
||||
original_rcv2 = self._socket.getsockopt(zmq.RCVTIMEO)
|
||||
try:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, max(30000, int(original_rcv2 or 0)))
|
||||
resp2 = self._socket.recv_multipart()
|
||||
except zmq.Again:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, original_rcv2)
|
||||
return False
|
||||
finally:
|
||||
try:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, original_rcv2)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
if len(resp2) != 1:
|
||||
return False
|
||||
j = json.loads(resp2[0].decode("utf-8"))
|
||||
return bool(j.get("model_saved") and j.get("model_loaded"))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def __del__(self) -> None:
|
||||
try:
|
||||
if self._socket is not None:
|
||||
self._socket.close()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if self._context is not None:
|
||||
self._context.term()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class RKNNModelRunner(BaseModelRunner):
|
||||
"""Run RKNN models for embeddings."""
|
||||
|
||||
@@ -594,16 +424,13 @@ def get_optimized_runner(
|
||||
) -> BaseModelRunner:
|
||||
"""Get an optimized runner for the hardware."""
|
||||
device = device or "AUTO"
|
||||
if is_rknn_compatible(model_path):
|
||||
|
||||
if device != "CPU" and is_rknn_compatible(model_path):
|
||||
rknn_path = auto_convert_model(model_path)
|
||||
|
||||
if rknn_path:
|
||||
return RKNNModelRunner(rknn_path)
|
||||
|
||||
if device == "ZMQ" and not ZmqIpcRunner.is_complex_model(model_type):
|
||||
logger.info(f"Using ZMQ detector model {model_path}")
|
||||
return ZmqIpcRunner(model_path, model_type, **kwargs)
|
||||
|
||||
providers, options = get_ort_providers(device == "CPU", device, **kwargs)
|
||||
|
||||
if providers[0] == "CPUExecutionProvider":
|
||||
@@ -630,6 +457,15 @@ def get_optimized_runner(
|
||||
options[0]["device_id"],
|
||||
)
|
||||
|
||||
if (
|
||||
providers
|
||||
and providers[0] == "MIGraphXExecutionProvider"
|
||||
and ONNXModelRunner.is_migraphx_complex_model(model_type)
|
||||
):
|
||||
# Don't use MIGraphX for models that are not supported
|
||||
providers.pop(0)
|
||||
options.pop(0)
|
||||
|
||||
return ONNXModelRunner(
|
||||
ort.InferenceSession(
|
||||
model_path,
|
||||
|
@@ -161,6 +161,10 @@ class ModelConfig(BaseModel):
|
||||
if model_info.get("inputDataType"):
|
||||
self.input_dtype = InputDTypeEnum(model_info["inputDataType"])
|
||||
|
||||
# RKNN always uses NHWC
|
||||
if detector == "rknn":
|
||||
self.input_tensor = InputTensorEnum.nhwc
|
||||
|
||||
# generate list of attribute labels
|
||||
self.attributes_map = {
|
||||
**model_info.get("attributes", DEFAULT_ATTRIBUTE_LABEL_MAP),
|
||||
|
@@ -33,10 +33,6 @@ def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarr
|
||||
image = image[0]
|
||||
|
||||
h, w = image.shape[:2]
|
||||
|
||||
if (w, h) == (320, 320) and (model_w, model_h) == (640, 640):
|
||||
return cv2.resize(image, (model_w, model_h), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
scale = min(model_w / w, model_h / h)
|
||||
new_w, new_h = int(w * scale), int(h * scale)
|
||||
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
|
||||
|
@@ -165,8 +165,9 @@ class Rknn(DetectionApi):
|
||||
if not os.path.isdir(model_cache_dir):
|
||||
os.mkdir(model_cache_dir)
|
||||
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
urllib.request.urlretrieve(
|
||||
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.2-2/{filename}",
|
||||
f"{GITHUB_ENDPOINT}/MarcA711/rknn-models/releases/download/v2.3.2-2/{filename}",
|
||||
model_cache_dir + filename,
|
||||
)
|
||||
|
||||
|
103
frigate/detectors/plugins/synaptics.py
Normal file
103
frigate/detectors/plugins/synaptics.py
Normal file
@@ -0,0 +1,103 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
InputTensorEnum,
|
||||
ModelTypeEnum,
|
||||
)
|
||||
|
||||
try:
|
||||
from synap import Network
|
||||
from synap.postprocessor import Detector
|
||||
from synap.preprocessor import Preprocessor
|
||||
from synap.types import Layout, Shape
|
||||
|
||||
SYNAP_SUPPORT = True
|
||||
except ImportError:
|
||||
SYNAP_SUPPORT = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "synaptics"
|
||||
|
||||
|
||||
class SynapDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
|
||||
|
||||
class SynapDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: SynapDetectorConfig):
|
||||
if not SYNAP_SUPPORT:
|
||||
logger.error(
|
||||
"Error importing Synaptics SDK modules. You must use the -synaptics Docker image variant for Synaptics detector support."
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
_, ext = os.path.splitext(detector_config.model.path)
|
||||
if ext and ext != ".synap":
|
||||
raise ValueError("Model path config for Synap1680 is incorrect.")
|
||||
|
||||
synap_network = Network(detector_config.model.path)
|
||||
logger.info(f"Synap NPU loaded model: {detector_config.model.path}")
|
||||
except ValueError as ve:
|
||||
logger.error(f"Synap1680 setup has failed: {ve}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to init Synap NPU: {e}")
|
||||
raise
|
||||
|
||||
self.width = detector_config.model.width
|
||||
self.height = detector_config.model.height
|
||||
self.model_type = detector_config.model.model_type
|
||||
self.network = synap_network
|
||||
self.network_input_details = self.network.inputs[0]
|
||||
self.input_tensor_layout = detector_config.model.input_tensor
|
||||
|
||||
# Create Inference Engine
|
||||
self.preprocessor = Preprocessor()
|
||||
self.detector = Detector(score_threshold=0.4, iou_threshold=0.4)
|
||||
|
||||
def detect_raw(self, tensor_input: np.ndarray):
|
||||
# It has only been testing for pre-converted mobilenet80 .tflite -> .synap model currently
|
||||
layout = Layout.nhwc # default layout
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
if self.input_tensor_layout == InputTensorEnum.nhwc:
|
||||
layout = Layout.nhwc
|
||||
|
||||
postprocess_data = self.preprocessor.assign(
|
||||
self.network.inputs, tensor_input, Shape(tensor_input.shape), layout
|
||||
)
|
||||
output_tensor_obj = self.network.predict()
|
||||
output = self.detector.process(output_tensor_obj, postprocess_data)
|
||||
|
||||
if self.model_type == ModelTypeEnum.ssd:
|
||||
for i, item in enumerate(output.items):
|
||||
if i == 20:
|
||||
break
|
||||
|
||||
bb = item.bounding_box
|
||||
# Convert corner coordinates to normalized [0,1] range
|
||||
x1 = bb.origin.x / self.width # Top-left X
|
||||
y1 = bb.origin.y / self.height # Top-left Y
|
||||
x2 = (bb.origin.x + bb.size.x) / self.width # Bottom-right X
|
||||
y2 = (bb.origin.y + bb.size.y) / self.height # Bottom-right Y
|
||||
detections[i] = [
|
||||
item.class_index,
|
||||
float(item.confidence),
|
||||
y1,
|
||||
x1,
|
||||
y2,
|
||||
x2,
|
||||
]
|
||||
else:
|
||||
logger.error(f"Unsupported model type: {self.model_type}")
|
||||
return detections
|
@@ -8,9 +8,7 @@ import zmq
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.comms.zmq_req_router_broker import REQ_ROUTER_ENDPOINT
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detection_runners import ZmqIpcRunner
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -51,7 +49,9 @@ class ZmqIpcDetector(DetectionApi):
|
||||
On any error or timeout, this detector returns a zero array of shape (20, 6).
|
||||
|
||||
Model Management:
|
||||
- Model transfer/availability is handled by the runner automatically
|
||||
- On initialization, sends model request to check if model is available
|
||||
- If model not available, sends model data via ZMQ
|
||||
- Only starts inference after model is ready
|
||||
"""
|
||||
|
||||
type_key = DETECTOR_KEY
|
||||
@@ -60,23 +60,21 @@ class ZmqIpcDetector(DetectionApi):
|
||||
super().__init__(detector_config)
|
||||
|
||||
self._context = zmq.Context()
|
||||
self._endpoint = REQ_ROUTER_ENDPOINT
|
||||
self._endpoint = detector_config.endpoint
|
||||
self._request_timeout_ms = detector_config.request_timeout_ms
|
||||
self._linger_ms = detector_config.linger_ms
|
||||
self._socket = None
|
||||
self._create_socket()
|
||||
|
||||
# Model management
|
||||
self._model_ready = False
|
||||
self._model_name = self._get_model_name()
|
||||
|
||||
# Initialize model if needed
|
||||
self._initialize_model()
|
||||
|
||||
# Preallocate zero result for error paths
|
||||
self._zero_result = np.zeros((20, 6), np.float32)
|
||||
self._runner = ZmqIpcRunner(
|
||||
model_path=self.detector_config.model.path,
|
||||
model_type=str(self.detector_config.model.model_type.value),
|
||||
request_timeout_ms=self._request_timeout_ms,
|
||||
linger_ms=self._linger_ms,
|
||||
endpoint=self._endpoint,
|
||||
)
|
||||
|
||||
def _create_socket(self) -> None:
|
||||
if self._socket is not None:
|
||||
@@ -98,12 +96,167 @@ class ZmqIpcDetector(DetectionApi):
|
||||
model_path = self.detector_config.model.path
|
||||
return os.path.basename(model_path)
|
||||
|
||||
def _initialize_model(self) -> None:
|
||||
"""Initialize the model by checking availability and transferring if needed."""
|
||||
try:
|
||||
logger.info(f"Initializing model: {self._model_name}")
|
||||
|
||||
# Check if model is available and transfer if needed
|
||||
if self._check_and_transfer_model():
|
||||
logger.info(f"Model {self._model_name} is ready")
|
||||
self._model_ready = True
|
||||
else:
|
||||
logger.error(f"Failed to initialize model {self._model_name}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize model: {e}")
|
||||
|
||||
def _check_and_transfer_model(self) -> bool:
|
||||
"""Check if model is available and transfer if needed in one atomic operation."""
|
||||
try:
|
||||
# Send model availability request
|
||||
header = {"model_request": True, "model_name": self._model_name}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
|
||||
self._socket.send_multipart([header_bytes])
|
||||
|
||||
# Temporarily increase timeout for model operations
|
||||
original_timeout = self._socket.getsockopt(zmq.RCVTIMEO)
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, 30000)
|
||||
|
||||
try:
|
||||
response_frames = self._socket.recv_multipart()
|
||||
finally:
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, original_timeout)
|
||||
|
||||
if len(response_frames) == 1:
|
||||
try:
|
||||
response = json.loads(response_frames[0].decode("utf-8"))
|
||||
model_available = response.get("model_available", False)
|
||||
model_loaded = response.get("model_loaded", False)
|
||||
|
||||
if model_available and model_loaded:
|
||||
return True
|
||||
elif model_available and not model_loaded:
|
||||
logger.error("Model exists but failed to load")
|
||||
return False
|
||||
else:
|
||||
return self._send_model_data()
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(
|
||||
"Received non-JSON response for model availability check"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.warning(
|
||||
"Received unexpected response format for model availability check"
|
||||
)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check and transfer model: {e}")
|
||||
return False
|
||||
|
||||
def _check_model_availability(self) -> bool:
|
||||
"""Check if the model is available on the detector."""
|
||||
try:
|
||||
# Send model availability request
|
||||
header = {"model_request": True, "model_name": self._model_name}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
|
||||
self._socket.send_multipart([header_bytes])
|
||||
|
||||
# Receive response
|
||||
response_frames = self._socket.recv_multipart()
|
||||
|
||||
# Check if this is a JSON response (model management)
|
||||
if len(response_frames) == 1:
|
||||
try:
|
||||
response = json.loads(response_frames[0].decode("utf-8"))
|
||||
model_available = response.get("model_available", False)
|
||||
model_loaded = response.get("model_loaded", False)
|
||||
logger.debug(
|
||||
f"Model availability check: available={model_available}, loaded={model_loaded}"
|
||||
)
|
||||
return model_available and model_loaded
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(
|
||||
"Received non-JSON response for model availability check"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.warning(
|
||||
"Received unexpected response format for model availability check"
|
||||
)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check model availability: {e}")
|
||||
return False
|
||||
|
||||
def _send_model_data(self) -> bool:
|
||||
"""Send model data to the detector."""
|
||||
try:
|
||||
model_path = self.detector_config.model.path
|
||||
|
||||
if not os.path.exists(model_path):
|
||||
logger.error(f"Model file not found: {model_path}")
|
||||
return False
|
||||
|
||||
logger.info(f"Transferring model to detector: {self._model_name}")
|
||||
with open(model_path, "rb") as f:
|
||||
model_data = f.read()
|
||||
|
||||
header = {"model_data": True, "model_name": self._model_name}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
|
||||
self._socket.send_multipart([header_bytes, model_data])
|
||||
|
||||
# Temporarily increase timeout for model loading (can take several seconds)
|
||||
original_timeout = self._socket.getsockopt(zmq.RCVTIMEO)
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, 30000)
|
||||
|
||||
try:
|
||||
# Receive response
|
||||
response_frames = self._socket.recv_multipart()
|
||||
finally:
|
||||
# Restore original timeout
|
||||
self._socket.setsockopt(zmq.RCVTIMEO, original_timeout)
|
||||
|
||||
# Check if this is a JSON response (model management)
|
||||
if len(response_frames) == 1:
|
||||
try:
|
||||
response = json.loads(response_frames[0].decode("utf-8"))
|
||||
model_saved = response.get("model_saved", False)
|
||||
model_loaded = response.get("model_loaded", False)
|
||||
if model_saved and model_loaded:
|
||||
logger.info(
|
||||
f"Model {self._model_name} transferred and loaded successfully"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"Model transfer failed: saved={model_saved}, loaded={model_loaded}"
|
||||
)
|
||||
return model_saved and model_loaded
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Received non-JSON response for model data transfer")
|
||||
return False
|
||||
else:
|
||||
logger.warning(
|
||||
"Received unexpected response format for model data transfer"
|
||||
)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send model data: {e}")
|
||||
return False
|
||||
|
||||
def _build_header(self, tensor_input: np.ndarray) -> bytes:
|
||||
header: dict[str, Any] = {
|
||||
"shape": list(tensor_input.shape),
|
||||
"dtype": str(tensor_input.dtype.name),
|
||||
"model_type": str(self.detector_config.model.model_type.name),
|
||||
"model_name": self._model_name,
|
||||
}
|
||||
return json.dumps(header).encode("utf-8")
|
||||
|
||||
@@ -132,11 +285,42 @@ class ZmqIpcDetector(DetectionApi):
|
||||
return self._zero_result
|
||||
|
||||
def detect_raw(self, tensor_input: np.ndarray) -> np.ndarray:
|
||||
if not self._model_ready:
|
||||
logger.warning("Model not ready, returning zero detections")
|
||||
return self._zero_result
|
||||
|
||||
try:
|
||||
result = self._runner.run({"input": tensor_input})
|
||||
return result if isinstance(result, np.ndarray) else self._zero_result
|
||||
header_bytes = self._build_header(tensor_input)
|
||||
payload_bytes = memoryview(tensor_input.tobytes(order="C"))
|
||||
|
||||
# Send request
|
||||
self._socket.send_multipart([header_bytes, payload_bytes])
|
||||
|
||||
# Receive reply
|
||||
reply_frames = self._socket.recv_multipart()
|
||||
detections = self._decode_response(reply_frames)
|
||||
|
||||
# Ensure output shape and dtype are exactly as expected
|
||||
return detections
|
||||
except zmq.Again:
|
||||
# Timeout
|
||||
logger.debug("ZMQ detector request timed out; resetting socket")
|
||||
try:
|
||||
self._create_socket()
|
||||
self._initialize_model()
|
||||
except Exception:
|
||||
pass
|
||||
return self._zero_result
|
||||
except zmq.ZMQError as exc:
|
||||
logger.error(f"ZMQ detector ZMQError: {exc}; resetting socket")
|
||||
try:
|
||||
self._create_socket()
|
||||
self._initialize_model()
|
||||
except Exception:
|
||||
pass
|
||||
return self._zero_result
|
||||
except Exception as exc: # noqa: BLE001
|
||||
logger.error(f"ZMQ IPC runner error: {exc}")
|
||||
logger.error(f"ZMQ detector unexpected error: {exc}")
|
||||
return self._zero_result
|
||||
|
||||
def __del__(self) -> None: # pragma: no cover - best-effort cleanup
|
||||
|
@@ -1,838 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ZMQ TCP ONNX Runtime Client
|
||||
|
||||
This client connects to the ZMQ TCP proxy, accepts tensor inputs,
|
||||
runs inference via ONNX Runtime, and returns detection results.
|
||||
|
||||
Protocol:
|
||||
- Receives multipart messages: [header_json_bytes, tensor_bytes]
|
||||
- Header contains shape and dtype information
|
||||
- Runs ONNX inference on the tensor
|
||||
- Returns results in the expected format: [20, 6] float32 array
|
||||
|
||||
Note: Timeouts are normal when Frigate has no motion to detect.
|
||||
The server will continue running and waiting for requests.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import zmq
|
||||
from model_util import post_process_dfine, post_process_rfdetr, post_process_yolo
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ZmqOnnxWorker(threading.Thread):
|
||||
"""
|
||||
A worker thread that connects a REP socket to the endpoint and processes
|
||||
requests using a shared model session map. This mirrors the single-runner
|
||||
logic, but the ONNX Runtime session is fetched from the shared map, and
|
||||
created on-demand if missing.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
worker_id: int,
|
||||
context: zmq.Context,
|
||||
endpoint: str,
|
||||
models_dir: str,
|
||||
model_sessions: Dict[str, ort.InferenceSession],
|
||||
model_lock: threading.Lock,
|
||||
providers: Optional[List[str]],
|
||||
zero_result: np.ndarray,
|
||||
) -> None:
|
||||
super().__init__(name=f"onnx_worker_{worker_id}", daemon=True)
|
||||
self.worker_id = worker_id
|
||||
self.context = context
|
||||
self.endpoint = self._normalize_endpoint(endpoint)
|
||||
self.models_dir = models_dir
|
||||
self.model_sessions = model_sessions
|
||||
self.model_lock = model_lock
|
||||
self.providers = providers
|
||||
self.zero_result = zero_result
|
||||
self.socket: Optional[zmq.Socket] = None
|
||||
|
||||
def _normalize_endpoint(self, endpoint: str) -> str:
|
||||
if endpoint.startswith("tcp://*:"):
|
||||
port = endpoint.split(":", 2)[-1]
|
||||
return f"tcp://127.0.0.1:{port}"
|
||||
return endpoint
|
||||
|
||||
# --- ZMQ helpers ---
|
||||
def _create_socket(self) -> zmq.Socket:
|
||||
sock = self.context.socket(zmq.REP)
|
||||
sock.setsockopt(zmq.RCVTIMEO, 5000)
|
||||
sock.setsockopt(zmq.SNDTIMEO, 5000)
|
||||
sock.setsockopt(zmq.LINGER, 0)
|
||||
sock.connect(self.endpoint)
|
||||
return sock
|
||||
|
||||
def _decode_request(self, frames: List[bytes]) -> Tuple[Optional[np.ndarray], dict]:
|
||||
if len(frames) < 1:
|
||||
raise ValueError(f"Expected at least 1 frame, got {len(frames)}")
|
||||
|
||||
header_bytes = frames[0]
|
||||
header = json.loads(header_bytes.decode("utf-8"))
|
||||
|
||||
if "model_request" in header:
|
||||
return None, header
|
||||
if "model_data" in header:
|
||||
return None, header
|
||||
if len(frames) < 2:
|
||||
raise ValueError(f"Tensor request expected 2 frames, got {len(frames)}")
|
||||
|
||||
tensor_bytes = frames[1]
|
||||
shape = tuple(header.get("shape", []))
|
||||
dtype_str = header.get("dtype", "uint8")
|
||||
|
||||
dtype = np.dtype(dtype_str)
|
||||
tensor = np.frombuffer(tensor_bytes, dtype=dtype).reshape(shape)
|
||||
return tensor, header
|
||||
|
||||
def _build_response(self, result: np.ndarray) -> List[bytes]:
|
||||
header = {
|
||||
"shape": list(result.shape),
|
||||
"dtype": str(result.dtype.name),
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
return [json.dumps(header).encode("utf-8"), result.tobytes(order="C")]
|
||||
|
||||
def _build_error_response(self, error_msg: str) -> List[bytes]:
|
||||
error_header = {"shape": [20, 6], "dtype": "float32", "error": error_msg}
|
||||
return [
|
||||
json.dumps(error_header).encode("utf-8"),
|
||||
self.zero_result.tobytes(order="C"),
|
||||
]
|
||||
|
||||
# --- Model/session helpers ---
|
||||
def _check_model_exists(self, model_name: str) -> bool:
|
||||
return os.path.exists(os.path.join(self.models_dir, model_name))
|
||||
|
||||
def _save_model(self, model_name: str, model_data: bytes) -> bool:
|
||||
try:
|
||||
os.makedirs(self.models_dir, exist_ok=True)
|
||||
with open(os.path.join(self.models_dir, model_name), "wb") as f:
|
||||
f.write(model_data)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Worker {self.worker_id} failed to save model {model_name}: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
def _get_or_create_session(self, model_name: str) -> Optional[ort.InferenceSession]:
|
||||
with self.model_lock:
|
||||
session = self.model_sessions.get(model_name)
|
||||
if session is not None:
|
||||
return session
|
||||
try:
|
||||
providers = self.providers or ["CoreMLExecutionProvider"]
|
||||
session = ort.InferenceSession(
|
||||
os.path.join(self.models_dir, model_name), providers=providers
|
||||
)
|
||||
self.model_sessions[model_name] = session
|
||||
return session
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Worker {self.worker_id} failed to load model {model_name}: {e}"
|
||||
)
|
||||
return None
|
||||
|
||||
# --- Inference helpers ---
|
||||
def _extract_input_hw(self, header: dict) -> Tuple[int, int]:
|
||||
try:
|
||||
if "width" in header and "height" in header:
|
||||
return int(header["width"]), int(header["height"])
|
||||
shape = tuple(header.get("shape", []))
|
||||
layout = header.get("layout") or header.get("order")
|
||||
if layout and shape:
|
||||
layout = str(layout).upper()
|
||||
if len(shape) == 4:
|
||||
if layout == "NCHW":
|
||||
return int(shape[3]), int(shape[2])
|
||||
if layout == "NHWC":
|
||||
return int(shape[2]), int(shape[1])
|
||||
if len(shape) == 3:
|
||||
if layout == "CHW":
|
||||
return int(shape[2]), int(shape[1])
|
||||
if layout == "HWC":
|
||||
return int(shape[1]), int(shape[0])
|
||||
if shape:
|
||||
if len(shape) == 4:
|
||||
_, d1, d2, d3 = shape
|
||||
if d1 in (1, 3):
|
||||
return int(d3), int(d2)
|
||||
if d3 in (1, 3):
|
||||
return int(d2), int(d1)
|
||||
return int(d2), int(d1)
|
||||
if len(shape) == 3:
|
||||
d0, d1, d2 = shape
|
||||
if d0 in (1, 3):
|
||||
return int(d2), int(d1)
|
||||
if d2 in (1, 3):
|
||||
return int(d1), int(d0)
|
||||
return int(d1), int(d0)
|
||||
if len(shape) == 2:
|
||||
h, w = shape
|
||||
return int(w), int(h)
|
||||
except Exception:
|
||||
pass
|
||||
return 320, 320
|
||||
|
||||
def _run_inference(
|
||||
self, session: ort.InferenceSession, tensor: np.ndarray, header: dict
|
||||
) -> np.ndarray:
|
||||
try:
|
||||
model_type = header.get("model_type")
|
||||
width, height = self._extract_input_hw(header)
|
||||
|
||||
if model_type == "dfine":
|
||||
input_data = {
|
||||
"images": tensor.astype(np.float32),
|
||||
"orig_target_sizes": np.array([[height, width]], dtype=np.int64),
|
||||
}
|
||||
else:
|
||||
input_name = session.get_inputs()[0].name
|
||||
input_data = {input_name: tensor}
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
t_start = time.perf_counter()
|
||||
|
||||
outputs = session.run(None, input_data)
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
t_after_onnx = time.perf_counter()
|
||||
|
||||
if model_type == "yolo-generic" or model_type == "yologeneric":
|
||||
result = post_process_yolo(outputs, width, height)
|
||||
elif model_type == "dfine":
|
||||
result = post_process_dfine(outputs, width, height)
|
||||
elif model_type == "rfdetr":
|
||||
result = post_process_rfdetr(outputs)
|
||||
else:
|
||||
result = np.zeros((20, 6), dtype=np.float32)
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
t_after_post = time.perf_counter()
|
||||
onnx_ms = (t_after_onnx - t_start) * 1000.0
|
||||
post_ms = (t_after_post - t_after_onnx) * 1000.0
|
||||
total_ms = (t_after_post - t_start) * 1000.0
|
||||
logger.debug(
|
||||
f"Worker {self.worker_id} timing: onnx={onnx_ms:.2f}ms, post={post_ms:.2f}ms, total={total_ms:.2f}ms"
|
||||
)
|
||||
|
||||
return result.astype(np.float32)
|
||||
except Exception as e:
|
||||
logger.error(f"Worker {self.worker_id} ONNX inference failed: {e}")
|
||||
return self.zero_result
|
||||
|
||||
# --- Message handlers ---
|
||||
def _handle_model_request(self, header: dict) -> List[bytes]:
|
||||
model_name = header.get("model_name")
|
||||
if not model_name:
|
||||
return self._build_error_response("Model request missing model_name")
|
||||
if self._check_model_exists(model_name):
|
||||
# Ensure session exists
|
||||
if self._get_or_create_session(model_name) is not None:
|
||||
response_header = {
|
||||
"model_available": True,
|
||||
"model_loaded": True,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} loaded successfully",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_available": True,
|
||||
"model_loaded": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} exists but failed to load",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_available": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} not found, please send model data",
|
||||
}
|
||||
return [json.dumps(response_header).encode("utf-8")]
|
||||
|
||||
def _handle_model_data(self, header: dict, model_data: bytes) -> List[bytes]:
|
||||
model_name = header.get("model_name")
|
||||
if not model_name:
|
||||
return self._build_error_response("Model data missing model_name")
|
||||
if self._save_model(model_name, model_data):
|
||||
# Ensure session is created
|
||||
if self._get_or_create_session(model_name) is not None:
|
||||
response_header = {
|
||||
"model_saved": True,
|
||||
"model_loaded": True,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} saved and loaded successfully",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_saved": True,
|
||||
"model_loaded": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} saved but failed to load",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_saved": False,
|
||||
"model_loaded": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Failed to save model {model_name}",
|
||||
}
|
||||
return [json.dumps(response_header).encode("utf-8")]
|
||||
|
||||
# --- Thread run ---
|
||||
def run(self) -> None: # pragma: no cover - runtime loop
|
||||
try:
|
||||
self.socket = self._create_socket()
|
||||
logger.info(
|
||||
f"Worker {self.worker_id} connected REP to endpoint: {self.endpoint}"
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
frames = self.socket.recv_multipart()
|
||||
tensor, header = self._decode_request(frames)
|
||||
|
||||
if "model_request" in header:
|
||||
response = self._handle_model_request(header)
|
||||
self.socket.send_multipart(response)
|
||||
continue
|
||||
if "model_data" in header and len(frames) >= 2:
|
||||
model_data = frames[1]
|
||||
response = self._handle_model_data(header, model_data)
|
||||
self.socket.send_multipart(response)
|
||||
continue
|
||||
if tensor is not None:
|
||||
model_name = header.get("model_name")
|
||||
session = None
|
||||
if model_name:
|
||||
session = self._get_or_create_session(model_name)
|
||||
if session is None:
|
||||
result = self.zero_result
|
||||
else:
|
||||
result = self._run_inference(session, tensor, header)
|
||||
self.socket.send_multipart(self._build_response(result))
|
||||
continue
|
||||
|
||||
# Unknown message: reply with zeros
|
||||
self.socket.send_multipart(self._build_response(self.zero_result))
|
||||
except zmq.Again:
|
||||
continue
|
||||
except zmq.ZMQError as e:
|
||||
logger.error(f"Worker {self.worker_id} ZMQ error: {e}")
|
||||
# Recreate socket on transient errors
|
||||
try:
|
||||
if self.socket:
|
||||
self.socket.close(linger=0)
|
||||
finally:
|
||||
self.socket = self._create_socket()
|
||||
except Exception as e:
|
||||
logger.error(f"Worker {self.worker_id} unexpected error: {e}")
|
||||
try:
|
||||
self.socket.send_multipart(self._build_error_response(str(e)))
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
try:
|
||||
if self.socket:
|
||||
self.socket.close(linger=0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class ZmqOnnxClient:
|
||||
"""
|
||||
ZMQ TCP client that runs ONNX inference on received tensors.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str = "tcp://*:5555",
|
||||
model_path: Optional[str] = "AUTO",
|
||||
providers: Optional[List[str]] = None,
|
||||
session_options: Optional[ort.SessionOptions] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the ZMQ ONNX client.
|
||||
|
||||
Args:
|
||||
endpoint: ZMQ IPC endpoint to bind to
|
||||
model_path: Path to ONNX model file or "AUTO" for automatic model management
|
||||
providers: ONNX Runtime execution providers
|
||||
session_options: ONNX Runtime session options
|
||||
"""
|
||||
self.endpoint = endpoint
|
||||
self.model_path = model_path
|
||||
self.current_model = None
|
||||
self.model_ready = False
|
||||
self.models_dir = os.path.join(
|
||||
os.path.dirname(os.path.dirname(__file__)), "models"
|
||||
)
|
||||
|
||||
# Shared ZMQ context and shared session map across workers
|
||||
self.context = zmq.Context()
|
||||
self.model_sessions: Dict[str, ort.InferenceSession] = {}
|
||||
self.model_lock = threading.Lock()
|
||||
self.providers = providers
|
||||
|
||||
# Preallocate zero result for error cases
|
||||
self.zero_result = np.zeros((20, 6), dtype=np.float32)
|
||||
|
||||
logger.info(f"ZMQ ONNX client will start workers on endpoint: {endpoint}")
|
||||
|
||||
def start_server(self, num_workers: int = 4) -> None:
|
||||
workers: list[ZmqOnnxWorker] = []
|
||||
for i in range(num_workers):
|
||||
w = ZmqOnnxWorker(
|
||||
worker_id=i,
|
||||
context=self.context,
|
||||
endpoint=self.endpoint,
|
||||
models_dir=self.models_dir,
|
||||
model_sessions=self.model_sessions,
|
||||
model_lock=self.model_lock,
|
||||
providers=self.providers,
|
||||
zero_result=self.zero_result,
|
||||
)
|
||||
w.start()
|
||||
workers.append(w)
|
||||
logger.info(f"Started {num_workers} ZMQ REP workers on backend {self.endpoint}")
|
||||
try:
|
||||
for w in workers:
|
||||
w.join()
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Shutting down workers...")
|
||||
|
||||
def _check_model_exists(self, model_name: str) -> bool:
|
||||
"""
|
||||
Check if a model exists in the models directory.
|
||||
|
||||
Args:
|
||||
model_name: Name of the model file to check
|
||||
|
||||
Returns:
|
||||
True if model exists, False otherwise
|
||||
"""
|
||||
model_path = os.path.join(self.models_dir, model_name)
|
||||
return os.path.exists(model_path)
|
||||
|
||||
# These methods remain for compatibility but are unused in worker mode
|
||||
|
||||
def _save_model(self, model_name: str, model_data: bytes) -> bool:
|
||||
"""
|
||||
Save model data to the models directory.
|
||||
|
||||
Args:
|
||||
model_name: Name of the model file to save
|
||||
model_data: Binary model data
|
||||
|
||||
Returns:
|
||||
True if model saved successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Ensure models directory exists
|
||||
os.makedirs(self.models_dir, exist_ok=True)
|
||||
|
||||
model_path = os.path.join(self.models_dir, model_name)
|
||||
logger.info(f"Saving model to: {model_path}")
|
||||
|
||||
with open(model_path, "wb") as f:
|
||||
f.write(model_data)
|
||||
|
||||
logger.info(f"Model saved successfully: {model_name}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save model {model_name}: {e}")
|
||||
return False
|
||||
|
||||
def _decode_request(self, frames: List[bytes]) -> Tuple[np.ndarray, dict]:
|
||||
"""
|
||||
Decode the incoming request frames.
|
||||
|
||||
Args:
|
||||
frames: List of message frames
|
||||
|
||||
Returns:
|
||||
Tuple of (tensor, header_dict)
|
||||
"""
|
||||
try:
|
||||
if len(frames) < 1:
|
||||
raise ValueError(f"Expected at least 1 frame, got {len(frames)}")
|
||||
|
||||
# Parse header
|
||||
header_bytes = frames[0]
|
||||
header = json.loads(header_bytes.decode("utf-8"))
|
||||
|
||||
if "model_request" in header:
|
||||
return None, header
|
||||
|
||||
if "model_data" in header:
|
||||
if len(frames) < 2:
|
||||
raise ValueError(
|
||||
f"Model data request expected 2 frames, got {len(frames)}"
|
||||
)
|
||||
return None, header
|
||||
|
||||
if len(frames) < 2:
|
||||
raise ValueError(f"Tensor request expected 2 frames, got {len(frames)}")
|
||||
|
||||
tensor_bytes = frames[1]
|
||||
shape = tuple(header.get("shape", []))
|
||||
dtype_str = header.get("dtype", "uint8")
|
||||
|
||||
dtype = np.dtype(dtype_str)
|
||||
tensor = np.frombuffer(tensor_bytes, dtype=dtype).reshape(shape)
|
||||
return tensor, header
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to decode request: {e}")
|
||||
raise
|
||||
|
||||
def _run_inference(self, tensor: np.ndarray, header: dict) -> np.ndarray:
|
||||
"""
|
||||
Run ONNX inference on the input tensor.
|
||||
|
||||
Args:
|
||||
tensor: Input tensor
|
||||
header: Request header containing metadata (e.g., shape, layout)
|
||||
|
||||
Returns:
|
||||
Detection results as numpy array
|
||||
|
||||
Raises:
|
||||
RuntimeError: If no ONNX session is available or inference fails
|
||||
"""
|
||||
if self.session is None:
|
||||
logger.warning("No ONNX session available, returning zero results")
|
||||
return self.zero_result
|
||||
|
||||
try:
|
||||
# Prepare input for ONNX Runtime
|
||||
# Determine input spatial size (W, H) from header/shape/layout
|
||||
model_type = header.get("model_type")
|
||||
width, height = self._extract_input_hw(header)
|
||||
|
||||
if model_type == "dfine":
|
||||
# DFine model requires both images and orig_target_sizes inputs
|
||||
input_data = {
|
||||
"images": tensor.astype(np.float32),
|
||||
"orig_target_sizes": np.array([[height, width]], dtype=np.int64),
|
||||
}
|
||||
else:
|
||||
# Other models use single input
|
||||
input_name = self.session.get_inputs()[0].name
|
||||
input_data = {input_name: tensor}
|
||||
|
||||
# Run inference
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
t_start = time.perf_counter()
|
||||
|
||||
outputs = self.session.run(None, input_data)
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
t_after_onnx = time.perf_counter()
|
||||
|
||||
if model_type == "yolo-generic" or model_type == "yologeneric":
|
||||
result = post_process_yolo(outputs, width, height)
|
||||
elif model_type == "dfine":
|
||||
result = post_process_dfine(outputs, width, height)
|
||||
elif model_type == "rfdetr":
|
||||
result = post_process_rfdetr(outputs)
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
t_after_post = time.perf_counter()
|
||||
onnx_ms = (t_after_onnx - t_start) * 1000.0
|
||||
post_ms = (t_after_post - t_after_onnx) * 1000.0
|
||||
total_ms = (t_after_post - t_start) * 1000.0
|
||||
logger.debug(
|
||||
f"Inference timing: onnx={onnx_ms:.2f}ms, post={post_ms:.2f}ms, total={total_ms:.2f}ms"
|
||||
)
|
||||
|
||||
# Ensure float32 dtype
|
||||
result = result.astype(np.float32)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"ONNX inference failed: {e}")
|
||||
return self.zero_result
|
||||
|
||||
def _extract_input_hw(self, header: dict) -> Tuple[int, int]:
|
||||
"""
|
||||
Extract (width, height) from the header and/or tensor shape, supporting
|
||||
NHWC/NCHW as well as 3D/4D inputs. Falls back to 320x320 if unknown.
|
||||
|
||||
Preference order:
|
||||
1) Explicit header keys: width/height
|
||||
2) Use provided layout to interpret shape
|
||||
3) Heuristics on shape
|
||||
"""
|
||||
try:
|
||||
if "width" in header and "height" in header:
|
||||
return int(header["width"]), int(header["height"])
|
||||
|
||||
shape = tuple(header.get("shape", []))
|
||||
layout = header.get("layout") or header.get("order")
|
||||
|
||||
if layout and shape:
|
||||
layout = str(layout).upper()
|
||||
if len(shape) == 4:
|
||||
if layout == "NCHW":
|
||||
return int(shape[3]), int(shape[2])
|
||||
if layout == "NHWC":
|
||||
return int(shape[2]), int(shape[1])
|
||||
if len(shape) == 3:
|
||||
if layout == "CHW":
|
||||
return int(shape[2]), int(shape[1])
|
||||
if layout == "HWC":
|
||||
return int(shape[1]), int(shape[0])
|
||||
|
||||
if shape:
|
||||
if len(shape) == 4:
|
||||
_, d1, d2, d3 = shape
|
||||
if d1 in (1, 3):
|
||||
return int(d3), int(d2)
|
||||
if d3 in (1, 3):
|
||||
return int(d2), int(d1)
|
||||
return int(d2), int(d1)
|
||||
if len(shape) == 3:
|
||||
d0, d1, d2 = shape
|
||||
if d0 in (1, 3):
|
||||
return int(d2), int(d1)
|
||||
if d2 in (1, 3):
|
||||
return int(d1), int(d0)
|
||||
return int(d1), int(d0)
|
||||
if len(shape) == 2:
|
||||
h, w = shape
|
||||
return int(w), int(h)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to extract input size from header: {e}")
|
||||
|
||||
logger.debug("Falling back to default input size (320x320)")
|
||||
return 320, 320
|
||||
|
||||
def _build_response(self, result: np.ndarray) -> List[bytes]:
|
||||
"""
|
||||
Build the response message.
|
||||
|
||||
Args:
|
||||
result: Detection results
|
||||
|
||||
Returns:
|
||||
List of response frames
|
||||
"""
|
||||
try:
|
||||
# Build header
|
||||
header = {
|
||||
"shape": list(result.shape),
|
||||
"dtype": str(result.dtype.name),
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
|
||||
# Convert result to bytes
|
||||
result_bytes = result.tobytes(order="C")
|
||||
|
||||
return [header_bytes, result_bytes]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to build response: {e}")
|
||||
# Return zero result as fallback
|
||||
header = {
|
||||
"shape": [20, 6],
|
||||
"dtype": "float32",
|
||||
"error": "Failed to build response",
|
||||
}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
result_bytes = self.zero_result.tobytes(order="C")
|
||||
return [header_bytes, result_bytes]
|
||||
|
||||
def _handle_model_request(self, header: dict) -> List[bytes]:
|
||||
"""
|
||||
Handle model availability request.
|
||||
|
||||
Args:
|
||||
header: Request header containing model information
|
||||
|
||||
Returns:
|
||||
Response message indicating model availability
|
||||
"""
|
||||
model_name = header.get("model_name")
|
||||
|
||||
if not model_name:
|
||||
logger.error("Model request missing model_name")
|
||||
return self._build_error_response("Model request missing model_name")
|
||||
|
||||
logger.info(f"Model availability request for: {model_name}")
|
||||
|
||||
if self._check_model_exists(model_name):
|
||||
logger.info(f"Model {model_name} exists locally")
|
||||
# Try to load the model
|
||||
if self._load_model(model_name):
|
||||
response_header = {
|
||||
"model_available": True,
|
||||
"model_loaded": True,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} loaded successfully",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_available": True,
|
||||
"model_loaded": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} exists but failed to load",
|
||||
}
|
||||
else:
|
||||
logger.info(f"Model {model_name} not found, requesting transfer")
|
||||
response_header = {
|
||||
"model_available": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} not found, please send model data",
|
||||
}
|
||||
|
||||
return [json.dumps(response_header).encode("utf-8")]
|
||||
|
||||
def _handle_model_data(self, header: dict, model_data: bytes) -> List[bytes]:
|
||||
"""
|
||||
Handle model data transfer.
|
||||
|
||||
Args:
|
||||
header: Request header containing model information
|
||||
model_data: Binary model data
|
||||
|
||||
Returns:
|
||||
Response message indicating save success/failure
|
||||
"""
|
||||
model_name = header.get("model_name")
|
||||
|
||||
if not model_name:
|
||||
logger.error("Model data missing model_name")
|
||||
return self._build_error_response("Model data missing model_name")
|
||||
|
||||
logger.info(f"Received model data for: {model_name}")
|
||||
|
||||
if self._save_model(model_name, model_data):
|
||||
# Try to load the model
|
||||
if self._load_model(model_name):
|
||||
response_header = {
|
||||
"model_saved": True,
|
||||
"model_loaded": True,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} saved and loaded successfully",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_saved": True,
|
||||
"model_loaded": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Model {model_name} saved but failed to load",
|
||||
}
|
||||
else:
|
||||
response_header = {
|
||||
"model_saved": False,
|
||||
"model_loaded": False,
|
||||
"model_name": model_name,
|
||||
"message": f"Failed to save model {model_name}",
|
||||
}
|
||||
|
||||
return [json.dumps(response_header).encode("utf-8")]
|
||||
|
||||
def _build_error_response(self, error_msg: str) -> List[bytes]:
|
||||
"""Build an error response message."""
|
||||
error_header = {"error": error_msg}
|
||||
return [json.dumps(error_header).encode("utf-8")]
|
||||
|
||||
# Removed legacy single-thread start_server implementation in favor of worker pool
|
||||
|
||||
def _send_error_response(self, error_msg: str):
|
||||
"""Send an error response to the client."""
|
||||
try:
|
||||
error_header = {"shape": [20, 6], "dtype": "float32", "error": error_msg}
|
||||
error_response = [
|
||||
json.dumps(error_header).encode("utf-8"),
|
||||
self.zero_result.tobytes(order="C"),
|
||||
]
|
||||
self.socket.send_multipart(error_response)
|
||||
except Exception as send_error:
|
||||
logger.error(f"Failed to send error response: {send_error}")
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up resources."""
|
||||
try:
|
||||
if self.socket:
|
||||
self.socket.close()
|
||||
self.socket = None
|
||||
if self.context:
|
||||
self.context.term()
|
||||
self.context = None
|
||||
logger.info("Cleanup completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Cleanup error: {e}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to run the ZMQ ONNX client."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="ZMQ TCP ONNX Runtime Client")
|
||||
parser.add_argument(
|
||||
"--endpoint",
|
||||
default="tcp://*:5555",
|
||||
help="ZMQ TCP endpoint (default: tcp://*:5555)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
default="AUTO",
|
||||
help="Path to ONNX model file or AUTO for automatic model management",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--providers",
|
||||
nargs="+",
|
||||
default=["CoreMLExecutionProvider"],
|
||||
help="ONNX Runtime execution providers",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=4,
|
||||
help="Number of REP worker threads",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose", "-v", action="store_true", help="Enable verbose logging"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
# Create and start client
|
||||
client = ZmqOnnxClient(
|
||||
endpoint=args.endpoint, model_path=args.model, providers=args.providers
|
||||
)
|
||||
|
||||
try:
|
||||
client.start_server(num_workers=args.workers)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Interrupted by user")
|
||||
finally:
|
||||
client.cleanup()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -144,7 +144,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
EventMetadataTypeEnum.regenerate_description
|
||||
)
|
||||
self.recordings_subscriber = RecordingsDataSubscriber(
|
||||
RecordingsDataTypeEnum.recordings_available_through
|
||||
RecordingsDataTypeEnum.saved
|
||||
)
|
||||
self.review_subscriber = ReviewDataSubscriber("")
|
||||
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video.value)
|
||||
@@ -313,6 +313,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if resp is not None:
|
||||
return resp
|
||||
|
||||
logger.error(f"No processor handled the topic {topic}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to handle embeddings request {e}", exc_info=True)
|
||||
@@ -524,20 +525,28 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
def _process_recordings_updates(self) -> None:
|
||||
"""Process recordings updates."""
|
||||
while True:
|
||||
recordings_data = self.recordings_subscriber.check_for_update()
|
||||
update = self.recordings_subscriber.check_for_update()
|
||||
|
||||
if recordings_data == None:
|
||||
if not update:
|
||||
break
|
||||
|
||||
camera, recordings_available_through_timestamp = recordings_data
|
||||
(raw_topic, payload) = update
|
||||
|
||||
self.recordings_available_through[camera] = (
|
||||
recordings_available_through_timestamp
|
||||
)
|
||||
if not raw_topic or not payload:
|
||||
break
|
||||
|
||||
logger.debug(
|
||||
f"{camera} now has recordings available through {recordings_available_through_timestamp}"
|
||||
)
|
||||
topic = str(raw_topic)
|
||||
|
||||
if topic.endswith(RecordingsDataTypeEnum.saved.value):
|
||||
camera, recordings_available_through_timestamp, _ = payload
|
||||
|
||||
self.recordings_available_through[camera] = (
|
||||
recordings_available_through_timestamp
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"{camera} now has recordings available through {recordings_available_through_timestamp}"
|
||||
)
|
||||
|
||||
def _process_review_updates(self) -> None:
|
||||
"""Process review updates."""
|
||||
|
@@ -27,11 +27,12 @@ FACENET_INPUT_SIZE = 160
|
||||
|
||||
class FaceNetEmbedding(BaseEmbedding):
|
||||
def __init__(self):
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
super().__init__(
|
||||
model_name="facedet",
|
||||
model_file="facenet.tflite",
|
||||
download_urls={
|
||||
"facenet.tflite": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite",
|
||||
"facenet.tflite": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite",
|
||||
},
|
||||
)
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
@@ -114,11 +115,12 @@ class FaceNetEmbedding(BaseEmbedding):
|
||||
|
||||
class ArcfaceEmbedding(BaseEmbedding):
|
||||
def __init__(self, config: FaceRecognitionConfig):
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
super().__init__(
|
||||
model_name="facedet",
|
||||
model_file="arcface.onnx",
|
||||
download_urls={
|
||||
"arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
|
||||
"arcface.onnx": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
|
||||
},
|
||||
)
|
||||
self.config = config
|
||||
|
@@ -37,11 +37,12 @@ class PaddleOCRDetection(BaseEmbedding):
|
||||
if model_size == "large"
|
||||
else "detection_v5-small.onnx"
|
||||
)
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
super().__init__(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file=model_file,
|
||||
download_urls={
|
||||
model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{'v3' if model_size == 'large' else 'v5'}/{model_file}"
|
||||
model_file: f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{'v3' if model_size == 'large' else 'v5'}/{model_file}"
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
@@ -97,11 +98,12 @@ class PaddleOCRClassification(BaseEmbedding):
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
super().__init__(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="classification.onnx",
|
||||
download_urls={
|
||||
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
|
||||
"classification.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
@@ -157,12 +159,13 @@ class PaddleOCRRecognition(BaseEmbedding):
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
super().__init__(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="recognition_v4.onnx",
|
||||
download_urls={
|
||||
"recognition_v4.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/recognition_v4.onnx",
|
||||
"ppocr_keys_v1.txt": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/ppocr_keys_v1.txt",
|
||||
"recognition_v4.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/recognition_v4.onnx",
|
||||
"ppocr_keys_v1.txt": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/ppocr_keys_v1.txt",
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
@@ -218,11 +221,12 @@ class LicensePlateDetector(BaseEmbedding):
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||
super().__init__(
|
||||
model_name="yolov9_license_plate",
|
||||
model_file="yolov9-256-license-plates.onnx",
|
||||
download_urls={
|
||||
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
|
||||
"yolov9-256-license-plates.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
|
||||
},
|
||||
)
|
||||
|
||||
|
@@ -44,6 +44,7 @@ class GenAIClient:
|
||||
concerns: list[str],
|
||||
preferred_language: str | None,
|
||||
debug_save: bool,
|
||||
activity_context_prompt: str,
|
||||
) -> ReviewMetadata | None:
|
||||
"""Generate a description for the review item activity."""
|
||||
|
||||
@@ -65,29 +66,36 @@ class GenAIClient:
|
||||
context_prompt = f"""
|
||||
Please analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
||||
|
||||
Your task is to provide a clear, security-focused description of the scene that:
|
||||
Your task is to provide a clear, accurate description of the scene that:
|
||||
1. States exactly what is happening based on observable actions and movements.
|
||||
2. Identifies and emphasizes behaviors that match patterns of suspicious activity.
|
||||
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
|
||||
3. Assigns a potential_threat_level based on the definitions below, applying them consistently.
|
||||
|
||||
Facts come first, but identifying security risks is the primary goal.
|
||||
Provide an objective assessment. The goal is accuracy—neither missing genuine threats nor over-flagging routine activity for this property.
|
||||
|
||||
When forming your description:
|
||||
- Describe the time, people, and objects exactly as seen. Include any observable environmental changes (e.g., lighting changes triggered by activity).
|
||||
- Time of day should **increase suspicion only when paired with unusual or security-relevant behaviors**. Do not raise the threat level for common residential activities (e.g., residents walking pets, retrieving mail, gardening, playing with pets, supervising children) even at unusual hours, unless other suspicious indicators are present.
|
||||
- Focus on behaviors that are uncharacteristic of innocent activity: loitering without clear purpose, avoiding cameras, inspecting vehicles/doors, changing behavior when lights activate, scanning surroundings without an apparent benign reason.
|
||||
- **Benign context override**: If scanning or looking around is clearly part of an innocent activity (such as playing with a dog, gardening, supervising children, or watching for a pet), do not treat it as suspicious.
|
||||
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
|
||||
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
|
||||
- Describe what you observe: actions, movements, interactions with objects and the environment. Include any observable environmental changes (e.g., lighting changes triggered by activity).
|
||||
- Note visible details such as clothing, items being carried or placed, tools or equipment present, and how they interact with the property or objects.
|
||||
- Consider the full sequence chronologically: what happens from start to finish, how duration and actions relate to the location and objects involved.
|
||||
- **Use the actual timestamp provided in "Activity started at"** below for time of day context—do not infer time from image brightness or darkness. Unusual hours (late night/early morning) should increase suspicion when the observable behavior itself appears questionable. However, recognize that some legitimate activities can occur at any hour.
|
||||
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
|
||||
- **Weigh all evidence holistically**: Consider the complete picture including zone, objects, time, and actions together. A single ambiguous action should not override strong contextual evidence of normal activity. The overall pattern determines the threat level.
|
||||
|
||||
**Normal activity patterns for this property:**
|
||||
{activity_context_prompt}
|
||||
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `scene` (string): A full description including setting, entities, actions, and any plausible supported inferences.
|
||||
- `confidence` (float): 0-1 confidence in the analysis.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
|
||||
{get_concern_prompt()}
|
||||
|
||||
Threat-level definitions:
|
||||
- 0 — Typical or expected activity for this location/time (includes residents, guests, or known animals engaged in normal activities, even if they glance around or scan surroundings).
|
||||
- 1 — Unusual or suspicious activity: At least one security-relevant behavior is present **and not explainable by a normal residential activity**.
|
||||
- 2 — Active or immediate threat: Breaking in, vandalism, aggression, weapon display.
|
||||
- 0 — Normal activity: What you observe is consistent with expected activity for this property type. The observable evidence—considering zone context, detected objects, and timing together—supports a benign explanation. Use this for routine activities even if minor ambiguous elements exist.
|
||||
- 1 — Potentially suspicious: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation when you consider the zone, objects, and actions together. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. Reserve this level for situations that actually merit closer attention—not routine activities for this property.
|
||||
- 2 — Immediate threat: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
|
||||
|
||||
Sequence details:
|
||||
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
||||
@@ -98,8 +106,9 @@ Sequence details:
|
||||
|
||||
**IMPORTANT:**
|
||||
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
|
||||
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
|
||||
{get_language_prompt()}
|
||||
"""
|
||||
"""
|
||||
logger.debug(
|
||||
f"Sending {len(thumbnails)} images to create review description on {review_data['camera']}"
|
||||
)
|
||||
@@ -135,6 +144,7 @@ Sequence details:
|
||||
if review_data["recognized_objects"]:
|
||||
metadata.potential_threat_level = 0
|
||||
|
||||
metadata.time = review_data["start"]
|
||||
return metadata
|
||||
except Exception as e:
|
||||
# rarely LLMs can fail to follow directions on output format
|
||||
@@ -146,34 +156,75 @@ Sequence details:
|
||||
return None
|
||||
|
||||
def generate_review_summary(
|
||||
self, start_ts: float, end_ts: float, segments: list[dict[str, Any]]
|
||||
self,
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
segments: list[dict[str, Any]],
|
||||
debug_save: bool,
|
||||
) -> str | None:
|
||||
"""Generate a summary of review item descriptions over a period of time."""
|
||||
time_range = f"{datetime.datetime.fromtimestamp(start_ts).strftime('%I:%M %p')} to {datetime.datetime.fromtimestamp(end_ts).strftime('%I:%M %p')}"
|
||||
time_range = f"{datetime.datetime.fromtimestamp(start_ts).strftime('%B %d, %Y at %I:%M %p')} to {datetime.datetime.fromtimestamp(end_ts).strftime('%B %d, %Y at %I:%M %p')}"
|
||||
timeline_summary_prompt = f"""
|
||||
You are a security officer. Time range: {time_range}.
|
||||
You are a security officer.
|
||||
Time range: {time_range}.
|
||||
Input: JSON list with "scene", "confidence", "potential_threat_level" (1-2), "other_concerns".
|
||||
Write a report:
|
||||
|
||||
Security Summary - {time_range}
|
||||
[One-sentence overview of activity]
|
||||
[Chronological bullet list of events with timestamps if in scene]
|
||||
[Final threat assessment]
|
||||
Task: Write a concise, human-presentable security report in markdown format.
|
||||
|
||||
Rules:
|
||||
- List events in order.
|
||||
- Highlight potential_threat_level ≥ 1 with exact times.
|
||||
- Note any of the additional concerns which are present.
|
||||
- Note unusual activity even if not threats.
|
||||
- If no threats: "Final assessment: Only normal activity observed during this period."
|
||||
- No commentary, questions, or recommendations.
|
||||
- Output only the report.
|
||||
"""
|
||||
Rules for the report:
|
||||
|
||||
- Title & overview
|
||||
- Start with:
|
||||
# Security Summary - {time_range}
|
||||
- Write a 1-2 sentence situational overview capturing the general pattern of the period.
|
||||
|
||||
- Event details
|
||||
- Present events in chronological order as a bullet list.
|
||||
- **If multiple events occur within the same minute or overlapping time range, COMBINE them into a single bullet.**
|
||||
- Summarize the distinct activities as sub-points under the shared timestamp.
|
||||
- If no timestamp is given, preserve order but label as “Time not specified.”
|
||||
- Use bold timestamps for clarity.
|
||||
- Group bullets under subheadings when multiple events fall into the same category (e.g., Vehicle Activity, Porch Activity, Unusual Behavior).
|
||||
|
||||
- Threat levels
|
||||
- Always show (threat level: X) for each event.
|
||||
- If multiple events at the same time share the same threat level, only state it once.
|
||||
|
||||
- Final assessment
|
||||
- End with a Final Assessment section.
|
||||
- If all events are threat level 1 with no escalation:
|
||||
Final assessment: Only normal residential activity observed during this period.
|
||||
- If threat level 2+ events are present, clearly summarize them as Potential concerns requiring review.
|
||||
|
||||
- Conciseness
|
||||
- Do not repeat benign clothing/appearance details unless they distinguish individuals.
|
||||
- Summarize similar routine events instead of restating full scene descriptions.
|
||||
"""
|
||||
|
||||
for item in segments:
|
||||
timeline_summary_prompt += f"\n{item}"
|
||||
|
||||
return self._send(timeline_summary_prompt, [])
|
||||
if debug_save:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}", "prompt.txt"
|
||||
),
|
||||
"w",
|
||||
) as f:
|
||||
f.write(timeline_summary_prompt)
|
||||
|
||||
response = self._send(timeline_summary_prompt, [])
|
||||
|
||||
if debug_save and response:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}", "response.txt"
|
||||
),
|
||||
"w",
|
||||
) as f:
|
||||
f.write(response)
|
||||
|
||||
return response
|
||||
|
||||
def generate_object_description(
|
||||
self,
|
||||
|
@@ -80,9 +80,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
[CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.record],
|
||||
)
|
||||
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all.value)
|
||||
self.recordings_publisher = RecordingsDataPublisher(
|
||||
RecordingsDataTypeEnum.recordings_available_through
|
||||
)
|
||||
self.recordings_publisher = RecordingsDataPublisher()
|
||||
|
||||
self.stop_event = stop_event
|
||||
self.object_recordings_info: dict[str, list] = defaultdict(list)
|
||||
@@ -98,6 +96,41 @@ class RecordingMaintainer(threading.Thread):
|
||||
and not d.startswith("preview_")
|
||||
]
|
||||
|
||||
# publish newest cached segment per camera (including in use files)
|
||||
newest_cache_segments: dict[str, dict[str, Any]] = {}
|
||||
for cache in cache_files:
|
||||
cache_path = os.path.join(CACHE_DIR, cache)
|
||||
basename = os.path.splitext(cache)[0]
|
||||
camera, date = basename.rsplit("@", maxsplit=1)
|
||||
start_time = datetime.datetime.strptime(
|
||||
date, CACHE_SEGMENT_FORMAT
|
||||
).astimezone(datetime.timezone.utc)
|
||||
if (
|
||||
camera not in newest_cache_segments
|
||||
or start_time > newest_cache_segments[camera]["start_time"]
|
||||
):
|
||||
newest_cache_segments[camera] = {
|
||||
"start_time": start_time,
|
||||
"cache_path": cache_path,
|
||||
}
|
||||
|
||||
for camera, newest in newest_cache_segments.items():
|
||||
self.recordings_publisher.publish(
|
||||
(
|
||||
camera,
|
||||
newest["start_time"].timestamp(),
|
||||
newest["cache_path"],
|
||||
),
|
||||
RecordingsDataTypeEnum.latest.value,
|
||||
)
|
||||
# publish None for cameras with no cache files (but only if we know the camera exists)
|
||||
for camera_name in self.config.cameras:
|
||||
if camera_name not in newest_cache_segments:
|
||||
self.recordings_publisher.publish(
|
||||
(camera_name, None, None),
|
||||
RecordingsDataTypeEnum.latest.value,
|
||||
)
|
||||
|
||||
files_in_use = []
|
||||
for process in psutil.process_iter():
|
||||
try:
|
||||
@@ -111,7 +144,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
except psutil.Error:
|
||||
continue
|
||||
|
||||
# group recordings by camera
|
||||
# group recordings by camera (skip in-use for validation/moving)
|
||||
grouped_recordings: defaultdict[str, list[dict[str, Any]]] = defaultdict(list)
|
||||
for cache in cache_files:
|
||||
# Skip files currently in use
|
||||
@@ -233,7 +266,9 @@ class RecordingMaintainer(threading.Thread):
|
||||
recordings[0]["start_time"].timestamp()
|
||||
if self.config.cameras[camera].record.enabled
|
||||
else None,
|
||||
)
|
||||
None,
|
||||
),
|
||||
RecordingsDataTypeEnum.saved.value,
|
||||
)
|
||||
|
||||
recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
|
||||
@@ -250,7 +285,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
|
||||
async def validate_and_move_segment(
|
||||
self, camera: str, reviews: list[ReviewSegment], recording: dict[str, Any]
|
||||
) -> None:
|
||||
) -> Optional[Recordings]:
|
||||
cache_path: str = recording["cache_path"]
|
||||
start_time: datetime.datetime = recording["start_time"]
|
||||
record_config = self.config.cameras[camera].record
|
||||
@@ -261,7 +296,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
or not self.config.cameras[camera].record.enabled
|
||||
):
|
||||
self.drop_segment(cache_path)
|
||||
return
|
||||
return None
|
||||
|
||||
if cache_path in self.end_time_cache:
|
||||
end_time, duration = self.end_time_cache[cache_path]
|
||||
@@ -270,10 +305,18 @@ class RecordingMaintainer(threading.Thread):
|
||||
self.config.ffmpeg, cache_path, get_duration=True
|
||||
)
|
||||
|
||||
if segment_info["duration"]:
|
||||
duration = float(segment_info["duration"])
|
||||
else:
|
||||
duration = -1
|
||||
if not segment_info.get("has_valid_video", False):
|
||||
logger.warning(
|
||||
f"Invalid or missing video stream in segment {cache_path}. Discarding."
|
||||
)
|
||||
self.recordings_publisher.publish(
|
||||
(camera, start_time.timestamp(), cache_path),
|
||||
RecordingsDataTypeEnum.invalid.value,
|
||||
)
|
||||
self.drop_segment(cache_path)
|
||||
return None
|
||||
|
||||
duration = float(segment_info.get("duration", -1))
|
||||
|
||||
# ensure duration is within expected length
|
||||
if 0 < duration < MAX_SEGMENT_DURATION:
|
||||
@@ -284,8 +327,18 @@ class RecordingMaintainer(threading.Thread):
|
||||
logger.warning(f"Failed to probe corrupt segment {cache_path}")
|
||||
|
||||
logger.warning(f"Discarding a corrupt recording segment: {cache_path}")
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
return
|
||||
self.recordings_publisher.publish(
|
||||
(camera, start_time.timestamp(), cache_path),
|
||||
RecordingsDataTypeEnum.invalid.value,
|
||||
)
|
||||
self.drop_segment(cache_path)
|
||||
return None
|
||||
|
||||
# this segment has a valid duration and has video data, so publish an update
|
||||
self.recordings_publisher.publish(
|
||||
(camera, start_time.timestamp(), cache_path),
|
||||
RecordingsDataTypeEnum.valid.value,
|
||||
)
|
||||
|
||||
record_config = self.config.cameras[camera].record
|
||||
highest = None
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import random
|
||||
import string
|
||||
from typing import Any, Sequence
|
||||
from typing import Any, Sequence, cast
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@@ -17,6 +17,11 @@ from frigate.camera import PTZMetrics
|
||||
from frigate.config import CameraConfig
|
||||
from frigate.ptz.autotrack import PtzMotionEstimator
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.track.stationary_classifier import (
|
||||
StationaryMotionClassifier,
|
||||
StationaryThresholds,
|
||||
get_stationary_threshold,
|
||||
)
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
get_histogram,
|
||||
@@ -27,12 +32,6 @@ from frigate.util.object import average_boxes, median_of_boxes
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
THRESHOLD_KNOWN_ACTIVE_IOU = 0.2
|
||||
THRESHOLD_STATIONARY_CHECK_IOU = 0.6
|
||||
THRESHOLD_ACTIVE_CHECK_IOU = 0.9
|
||||
MAX_STATIONARY_HISTORY = 10
|
||||
|
||||
|
||||
# Normalizes distance from estimate relative to object size
|
||||
# Other ideas:
|
||||
# - if estimates are inaccurate for first N detections, compare with last_detection (may be fine)
|
||||
@@ -119,6 +118,7 @@ class NorfairTracker(ObjectTracker):
|
||||
self.ptz_motion_estimator: PtzMotionEstimator | None = None
|
||||
self.camera_name = config.name
|
||||
self.track_id_map: dict[str, str] = {}
|
||||
self.stationary_classifier = StationaryMotionClassifier()
|
||||
|
||||
# Define tracker configurations for static camera
|
||||
self.object_type_configs = {
|
||||
@@ -321,23 +321,15 @@ class NorfairTracker(ObjectTracker):
|
||||
|
||||
# tracks the current position of the object based on the last N bounding boxes
|
||||
# returns False if the object has moved outside its previous position
|
||||
def update_position(self, id: str, box: list[int], stationary: bool) -> bool:
|
||||
xmin, ymin, xmax, ymax = box
|
||||
position = self.positions[id]
|
||||
self.stationary_box_history[id].append(box)
|
||||
|
||||
if len(self.stationary_box_history[id]) > MAX_STATIONARY_HISTORY:
|
||||
self.stationary_box_history[id] = self.stationary_box_history[id][
|
||||
-MAX_STATIONARY_HISTORY:
|
||||
]
|
||||
|
||||
avg_iou = intersection_over_union(
|
||||
box, average_boxes(self.stationary_box_history[id])
|
||||
)
|
||||
|
||||
# object has minimal or zero iou
|
||||
# assume object is active
|
||||
if avg_iou < THRESHOLD_KNOWN_ACTIVE_IOU:
|
||||
def update_position(
|
||||
self,
|
||||
id: str,
|
||||
box: list[int],
|
||||
stationary: bool,
|
||||
thresholds: StationaryThresholds,
|
||||
yuv_frame: np.ndarray | None,
|
||||
) -> bool:
|
||||
def reset_position(xmin: int, ymin: int, xmax: int, ymax: int) -> None:
|
||||
self.positions[id] = {
|
||||
"xmins": [xmin],
|
||||
"ymins": [ymin],
|
||||
@@ -348,13 +340,50 @@ class NorfairTracker(ObjectTracker):
|
||||
"xmax": xmax,
|
||||
"ymax": ymax,
|
||||
}
|
||||
return False
|
||||
|
||||
xmin, ymin, xmax, ymax = box
|
||||
position = self.positions[id]
|
||||
self.stationary_box_history[id].append(box)
|
||||
|
||||
if len(self.stationary_box_history[id]) > thresholds.max_stationary_history:
|
||||
self.stationary_box_history[id] = self.stationary_box_history[id][
|
||||
-thresholds.max_stationary_history :
|
||||
]
|
||||
|
||||
avg_box = average_boxes(self.stationary_box_history[id])
|
||||
avg_iou = intersection_over_union(box, avg_box)
|
||||
median_box = median_of_boxes(self.stationary_box_history[id])
|
||||
|
||||
# Establish anchor early when stationary and stable
|
||||
if stationary and yuv_frame is not None:
|
||||
history = self.stationary_box_history[id]
|
||||
if id not in self.stationary_classifier.anchor_crops and len(history) >= 5:
|
||||
stability_iou = intersection_over_union(avg_box, median_box)
|
||||
if stability_iou >= 0.7:
|
||||
self.stationary_classifier.ensure_anchor(
|
||||
id, yuv_frame, cast(tuple[int, int, int, int], median_box)
|
||||
)
|
||||
|
||||
# object has minimal or zero iou
|
||||
# assume object is active
|
||||
if avg_iou < thresholds.known_active_iou:
|
||||
if stationary and yuv_frame is not None:
|
||||
if not self.stationary_classifier.evaluate(
|
||||
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
|
||||
):
|
||||
reset_position(xmin, ymin, xmax, ymax)
|
||||
return False
|
||||
else:
|
||||
reset_position(xmin, ymin, xmax, ymax)
|
||||
return False
|
||||
|
||||
threshold = (
|
||||
THRESHOLD_STATIONARY_CHECK_IOU if stationary else THRESHOLD_ACTIVE_CHECK_IOU
|
||||
thresholds.stationary_check_iou
|
||||
if stationary
|
||||
else thresholds.active_check_iou
|
||||
)
|
||||
|
||||
# object has iou below threshold, check median to reduce outliers
|
||||
# object has iou below threshold, check median and optionally crop similarity
|
||||
if avg_iou < threshold:
|
||||
median_iou = intersection_over_union(
|
||||
(
|
||||
@@ -363,27 +392,26 @@ class NorfairTracker(ObjectTracker):
|
||||
position["xmax"],
|
||||
position["ymax"],
|
||||
),
|
||||
median_of_boxes(self.stationary_box_history[id]),
|
||||
median_box,
|
||||
)
|
||||
|
||||
# if the median iou drops below the threshold
|
||||
# assume object is no longer stationary
|
||||
if median_iou < threshold:
|
||||
self.positions[id] = {
|
||||
"xmins": [xmin],
|
||||
"ymins": [ymin],
|
||||
"xmaxs": [xmax],
|
||||
"ymaxs": [ymax],
|
||||
"xmin": xmin,
|
||||
"ymin": ymin,
|
||||
"xmax": xmax,
|
||||
"ymax": ymax,
|
||||
}
|
||||
return False
|
||||
# If we have a yuv_frame to check before flipping to active, check with classifier if we have YUV frame
|
||||
if stationary and yuv_frame is not None:
|
||||
if not self.stationary_classifier.evaluate(
|
||||
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
|
||||
):
|
||||
reset_position(xmin, ymin, xmax, ymax)
|
||||
return False
|
||||
else:
|
||||
reset_position(xmin, ymin, xmax, ymax)
|
||||
return False
|
||||
|
||||
# if there are more than 5 and less than 10 entries for the position, add the bounding box
|
||||
# and recompute the position box
|
||||
if 5 <= len(position["xmins"]) < 10:
|
||||
if len(position["xmins"]) < 10:
|
||||
position["xmins"].append(xmin)
|
||||
position["ymins"].append(ymin)
|
||||
position["xmaxs"].append(xmax)
|
||||
@@ -416,7 +444,13 @@ class NorfairTracker(ObjectTracker):
|
||||
|
||||
return False
|
||||
|
||||
def update(self, track_id: str, obj: dict[str, Any]) -> None:
|
||||
def update(
|
||||
self,
|
||||
track_id: str,
|
||||
obj: dict[str, Any],
|
||||
thresholds: StationaryThresholds,
|
||||
yuv_frame: np.ndarray | None,
|
||||
) -> None:
|
||||
id = self.track_id_map[track_id]
|
||||
self.disappeared[id] = 0
|
||||
stationary = (
|
||||
@@ -424,7 +458,7 @@ class NorfairTracker(ObjectTracker):
|
||||
>= self.detect_config.stationary.threshold
|
||||
)
|
||||
# update the motionless count if the object has not moved to a new position
|
||||
if self.update_position(id, obj["box"], stationary):
|
||||
if self.update_position(id, obj["box"], stationary, thresholds, yuv_frame):
|
||||
self.tracked_objects[id]["motionless_count"] += 1
|
||||
if self.is_expired(id):
|
||||
self.deregister(id, track_id)
|
||||
@@ -440,6 +474,7 @@ class NorfairTracker(ObjectTracker):
|
||||
self.tracked_objects[id]["position_changes"] += 1
|
||||
self.tracked_objects[id]["motionless_count"] = 0
|
||||
self.stationary_box_history[id] = []
|
||||
self.stationary_classifier.on_active(id)
|
||||
|
||||
self.tracked_objects[id].update(obj)
|
||||
|
||||
@@ -467,6 +502,15 @@ class NorfairTracker(ObjectTracker):
|
||||
) -> None:
|
||||
# Group detections by object type
|
||||
detections_by_type: dict[str, list[Detection]] = {}
|
||||
yuv_frame: np.ndarray | None = None
|
||||
|
||||
if (
|
||||
self.ptz_metrics.autotracker_enabled.value
|
||||
or self.detect_config.stationary.classifier
|
||||
):
|
||||
yuv_frame = self.frame_manager.get(
|
||||
frame_name, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
for obj in detections:
|
||||
label = obj[0]
|
||||
if label not in detections_by_type:
|
||||
@@ -481,9 +525,6 @@ class NorfairTracker(ObjectTracker):
|
||||
|
||||
embedding = None
|
||||
if self.ptz_metrics.autotracker_enabled.value:
|
||||
yuv_frame = self.frame_manager.get(
|
||||
frame_name, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
embedding = get_histogram(
|
||||
yuv_frame, obj[2][0], obj[2][1], obj[2][2], obj[2][3]
|
||||
)
|
||||
@@ -575,7 +616,13 @@ class NorfairTracker(ObjectTracker):
|
||||
self.tracked_objects[id]["estimate"] = new_obj["estimate"]
|
||||
# else update it
|
||||
else:
|
||||
self.update(str(t.global_id), new_obj)
|
||||
thresholds = get_stationary_threshold(new_obj["label"])
|
||||
self.update(
|
||||
str(t.global_id),
|
||||
new_obj,
|
||||
thresholds,
|
||||
yuv_frame if thresholds.motion_classifier_enabled else None,
|
||||
)
|
||||
|
||||
# clear expired tracks
|
||||
expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids]
|
||||
|
254
frigate/track/stationary_classifier.py
Normal file
254
frigate/track/stationary_classifier.py
Normal file
@@ -0,0 +1,254 @@
|
||||
"""Tools for determining if an object is stationary."""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, cast
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from scipy.ndimage import gaussian_filter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StationaryThresholds:
|
||||
"""IOU thresholds and history parameters for stationary object classification.
|
||||
|
||||
This allows different sensitivity settings for different object types.
|
||||
"""
|
||||
|
||||
# Objects to apply these thresholds to
|
||||
# If None, apply to all objects
|
||||
objects: list[str] = field(default_factory=list)
|
||||
|
||||
# Threshold of IoU that causes the object to immediately be considered active
|
||||
# Below this threshold, assume object is active
|
||||
known_active_iou: float = 0.2
|
||||
|
||||
# IOU threshold for checking if stationary object has moved
|
||||
# If mean and median IOU drops below this, assume object is no longer stationary
|
||||
stationary_check_iou: float = 0.6
|
||||
|
||||
# IOU threshold for checking if active object has changed position
|
||||
# Higher threshold makes it more difficult for the object to be considered stationary
|
||||
active_check_iou: float = 0.9
|
||||
|
||||
# Maximum number of bounding boxes to keep in stationary history
|
||||
max_stationary_history: int = 10
|
||||
|
||||
# Whether to use the motion classifier
|
||||
motion_classifier_enabled: bool = False
|
||||
|
||||
|
||||
# Thresholds for objects that are expected to be stationary
|
||||
STATIONARY_OBJECT_THRESHOLDS = StationaryThresholds(
|
||||
objects=["bbq_grill", "package", "waste_bin"],
|
||||
known_active_iou=0.0,
|
||||
motion_classifier_enabled=True,
|
||||
)
|
||||
|
||||
# Thresholds for objects that are active but can be stationary for longer periods of time
|
||||
DYNAMIC_OBJECT_THRESHOLDS = StationaryThresholds(
|
||||
objects=["bicycle", "boat", "car", "motorcycle", "tractor", "truck"],
|
||||
active_check_iou=0.75,
|
||||
motion_classifier_enabled=True,
|
||||
)
|
||||
|
||||
|
||||
def get_stationary_threshold(label: str) -> StationaryThresholds:
|
||||
"""Get the stationary thresholds for a given object label."""
|
||||
|
||||
if label in STATIONARY_OBJECT_THRESHOLDS.objects:
|
||||
return STATIONARY_OBJECT_THRESHOLDS
|
||||
|
||||
if label in DYNAMIC_OBJECT_THRESHOLDS.objects:
|
||||
return DYNAMIC_OBJECT_THRESHOLDS
|
||||
|
||||
return StationaryThresholds()
|
||||
|
||||
|
||||
class StationaryMotionClassifier:
|
||||
"""Fallback classifier to prevent false flips from stationary to active.
|
||||
|
||||
Uses appearance consistency on a fixed spatial region (historical median box)
|
||||
to detect actual movement, ignoring bounding box detection variations.
|
||||
"""
|
||||
|
||||
CROP_SIZE = 96
|
||||
NCC_KEEP_THRESHOLD = 0.90 # High correlation = keep stationary
|
||||
NCC_ACTIVE_THRESHOLD = 0.85 # Low correlation = consider active
|
||||
SHIFT_KEEP_THRESHOLD = 0.02 # Small shift = keep stationary
|
||||
SHIFT_ACTIVE_THRESHOLD = 0.04 # Large shift = consider active
|
||||
DRIFT_ACTIVE_THRESHOLD = 0.12 # Cumulative drift over 5 frames
|
||||
CHANGED_FRAMES_TO_FLIP = 2
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.anchor_crops: dict[str, np.ndarray] = {}
|
||||
self.anchor_boxes: dict[str, tuple[int, int, int, int]] = {}
|
||||
self.changed_counts: dict[str, int] = {}
|
||||
self.shift_histories: dict[str, list[float]] = {}
|
||||
|
||||
# Pre-compute Hanning window for phase correlation
|
||||
hann = np.hanning(self.CROP_SIZE).astype(np.float64)
|
||||
self._hann2d = np.outer(hann, hann)
|
||||
|
||||
def reset(self, id: str) -> None:
|
||||
logger.debug("StationaryMotionClassifier.reset: id=%s", id)
|
||||
if id in self.anchor_crops:
|
||||
del self.anchor_crops[id]
|
||||
if id in self.anchor_boxes:
|
||||
del self.anchor_boxes[id]
|
||||
self.changed_counts[id] = 0
|
||||
self.shift_histories[id] = []
|
||||
|
||||
def _extract_y_crop(
|
||||
self, yuv_frame: np.ndarray, box: tuple[int, int, int, int]
|
||||
) -> np.ndarray:
|
||||
"""Extract and normalize Y-plane crop from bounding box."""
|
||||
y_height = yuv_frame.shape[0] // 3 * 2
|
||||
width = yuv_frame.shape[1]
|
||||
x1 = max(0, min(width - 1, box[0]))
|
||||
y1 = max(0, min(y_height - 1, box[1]))
|
||||
x2 = max(0, min(width - 1, box[2]))
|
||||
y2 = max(0, min(y_height - 1, box[3]))
|
||||
|
||||
if x2 <= x1:
|
||||
x2 = min(width - 1, x1 + 1)
|
||||
if y2 <= y1:
|
||||
y2 = min(y_height - 1, y1 + 1)
|
||||
|
||||
# Extract Y-plane crop, resize, and blur
|
||||
y_plane = yuv_frame[0:y_height, 0:width]
|
||||
crop = y_plane[y1:y2, x1:x2]
|
||||
crop_resized = cv2.resize(
|
||||
crop, (self.CROP_SIZE, self.CROP_SIZE), interpolation=cv2.INTER_AREA
|
||||
)
|
||||
result = cast(np.ndarray[Any, Any], gaussian_filter(crop_resized, sigma=0.5))
|
||||
logger.debug(
|
||||
"_extract_y_crop: box=%s clamped=(%d,%d,%d,%d) crop_shape=%s",
|
||||
box,
|
||||
x1,
|
||||
y1,
|
||||
x2,
|
||||
y2,
|
||||
crop.shape if "crop" in locals() else None,
|
||||
)
|
||||
return result
|
||||
|
||||
def ensure_anchor(
|
||||
self, id: str, yuv_frame: np.ndarray, median_box: tuple[int, int, int, int]
|
||||
) -> None:
|
||||
"""Initialize anchor crop from stable median box when object becomes stationary."""
|
||||
if id not in self.anchor_crops:
|
||||
self.anchor_boxes[id] = median_box
|
||||
self.anchor_crops[id] = self._extract_y_crop(yuv_frame, median_box)
|
||||
self.changed_counts[id] = 0
|
||||
self.shift_histories[id] = []
|
||||
logger.debug(
|
||||
"ensure_anchor: initialized id=%s median_box=%s crop_shape=%s",
|
||||
id,
|
||||
median_box,
|
||||
self.anchor_crops[id].shape,
|
||||
)
|
||||
|
||||
def on_active(self, id: str) -> None:
|
||||
"""Reset state when object becomes active to allow re-anchoring."""
|
||||
logger.debug("on_active: id=%s became active; resetting state", id)
|
||||
self.reset(id)
|
||||
|
||||
def evaluate(
|
||||
self, id: str, yuv_frame: np.ndarray, current_box: tuple[int, int, int, int]
|
||||
) -> bool:
|
||||
"""Return True to keep stationary, False to flip to active.
|
||||
|
||||
Compares the same spatial region (historical median box) across frames
|
||||
to detect actual movement, ignoring bounding box variations.
|
||||
"""
|
||||
|
||||
if id not in self.anchor_crops or id not in self.anchor_boxes:
|
||||
logger.debug("evaluate: id=%s has no anchor; default keep stationary", id)
|
||||
return True
|
||||
|
||||
# Compare same spatial region across frames
|
||||
anchor_box = self.anchor_boxes[id]
|
||||
anchor_crop = self.anchor_crops[id]
|
||||
curr_crop = self._extract_y_crop(yuv_frame, anchor_box)
|
||||
|
||||
# Compute appearance and motion metrics
|
||||
ncc = cv2.matchTemplate(curr_crop, anchor_crop, cv2.TM_CCOEFF_NORMED)[0, 0]
|
||||
a64 = anchor_crop.astype(np.float64) * self._hann2d
|
||||
c64 = curr_crop.astype(np.float64) * self._hann2d
|
||||
(shift_x, shift_y), _ = cv2.phaseCorrelate(a64, c64)
|
||||
shift_norm = float(np.hypot(shift_x, shift_y)) / float(self.CROP_SIZE)
|
||||
|
||||
logger.debug(
|
||||
"evaluate: id=%s metrics ncc=%.4f shift_norm=%.4f (shift_x=%.3f, shift_y=%.3f)",
|
||||
id,
|
||||
float(ncc),
|
||||
shift_norm,
|
||||
float(shift_x),
|
||||
float(shift_y),
|
||||
)
|
||||
|
||||
# Update rolling shift history
|
||||
history = self.shift_histories.get(id, [])
|
||||
history.append(shift_norm)
|
||||
if len(history) > 5:
|
||||
history = history[-5:]
|
||||
self.shift_histories[id] = history
|
||||
drift_sum = float(sum(history))
|
||||
|
||||
logger.debug(
|
||||
"evaluate: id=%s history_len=%d last_shift=%.4f drift_sum=%.4f",
|
||||
id,
|
||||
len(history),
|
||||
history[-1] if history else -1.0,
|
||||
drift_sum,
|
||||
)
|
||||
|
||||
# Early exit for clear stationary case
|
||||
if ncc >= self.NCC_KEEP_THRESHOLD and shift_norm < self.SHIFT_KEEP_THRESHOLD:
|
||||
self.changed_counts[id] = 0
|
||||
logger.debug(
|
||||
"evaluate: id=%s early-stationary keep=True (ncc>=%.2f and shift<%.2f)",
|
||||
id,
|
||||
self.NCC_KEEP_THRESHOLD,
|
||||
self.SHIFT_KEEP_THRESHOLD,
|
||||
)
|
||||
return True
|
||||
|
||||
# Check for movement indicators
|
||||
movement_detected = (
|
||||
ncc < self.NCC_ACTIVE_THRESHOLD
|
||||
or shift_norm >= self.SHIFT_ACTIVE_THRESHOLD
|
||||
or drift_sum >= self.DRIFT_ACTIVE_THRESHOLD
|
||||
)
|
||||
|
||||
if movement_detected:
|
||||
cnt = self.changed_counts.get(id, 0) + 1
|
||||
self.changed_counts[id] = cnt
|
||||
if (
|
||||
cnt >= self.CHANGED_FRAMES_TO_FLIP
|
||||
or drift_sum >= self.DRIFT_ACTIVE_THRESHOLD
|
||||
):
|
||||
logger.debug(
|
||||
"evaluate: id=%s flip_to_active=True cnt=%d drift_sum=%.4f thresholds(changed>=%d drift>=%.2f)",
|
||||
id,
|
||||
cnt,
|
||||
drift_sum,
|
||||
self.CHANGED_FRAMES_TO_FLIP,
|
||||
self.DRIFT_ACTIVE_THRESHOLD,
|
||||
)
|
||||
return False
|
||||
logger.debug(
|
||||
"evaluate: id=%s movement_detected cnt=%d keep_until_cnt>=%d",
|
||||
id,
|
||||
cnt,
|
||||
self.CHANGED_FRAMES_TO_FLIP,
|
||||
)
|
||||
else:
|
||||
self.changed_counts[id] = 0
|
||||
logger.debug("evaluate: id=%s no_movement keep=True", id)
|
||||
|
||||
return True
|
@@ -284,7 +284,9 @@ def post_process_yolox(
|
||||
|
||||
|
||||
def get_ort_providers(
|
||||
force_cpu: bool = False, device: str | None = "AUTO", requires_fp16: bool = False
|
||||
force_cpu: bool = False,
|
||||
device: str | None = "AUTO",
|
||||
requires_fp16: bool = False,
|
||||
) -> tuple[list[str], list[dict[str, Any]]]:
|
||||
if force_cpu:
|
||||
return (
|
||||
@@ -351,12 +353,15 @@ def get_ort_providers(
|
||||
}
|
||||
)
|
||||
elif provider == "MIGraphXExecutionProvider":
|
||||
# MIGraphX uses more CPU than ROCM, while also being the same speed
|
||||
if device == "MIGraphX":
|
||||
providers.append(provider)
|
||||
options.append({})
|
||||
else:
|
||||
continue
|
||||
migraphx_cache_dir = os.path.join(MODEL_CACHE_DIR, "migraphx")
|
||||
os.makedirs(migraphx_cache_dir, exist_ok=True)
|
||||
|
||||
providers.append(provider)
|
||||
options.append(
|
||||
{
|
||||
"migraphx_model_cache_dir": migraphx_cache_dir,
|
||||
}
|
||||
)
|
||||
elif provider == "CPUExecutionProvider":
|
||||
providers.append(provider)
|
||||
options.append(
|
||||
|
@@ -269,7 +269,20 @@ def is_object_filtered(obj, objects_to_track, object_filters):
|
||||
|
||||
def get_min_region_size(model_config: ModelConfig) -> int:
|
||||
"""Get the min region size."""
|
||||
return max(model_config.height, model_config.width)
|
||||
largest_dimension = max(model_config.height, model_config.width)
|
||||
|
||||
if largest_dimension > 320:
|
||||
# We originally tested allowing any model to have a region down to half of the model size
|
||||
# but this led to many false positives. In this case we specifically target larger models
|
||||
# which can benefit from a smaller region in some cases to detect smaller objects.
|
||||
half = int(largest_dimension / 2)
|
||||
|
||||
if half % 4 == 0:
|
||||
return half
|
||||
|
||||
return int((half + 3) / 4) * 4
|
||||
|
||||
return largest_dimension
|
||||
|
||||
|
||||
def create_tensor_input(frame, model_config: ModelConfig, region):
|
||||
|
@@ -303,7 +303,7 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s
|
||||
"-o",
|
||||
"-",
|
||||
"-s",
|
||||
"1",
|
||||
"1000", # Intel changed this from seconds to milliseconds in 2024+ versions
|
||||
]
|
||||
|
||||
if intel_gpu_device:
|
||||
@@ -603,87 +603,87 @@ def auto_detect_hwaccel() -> str:
|
||||
async def get_video_properties(
|
||||
ffmpeg, url: str, get_duration: bool = False
|
||||
) -> dict[str, Any]:
|
||||
async def calculate_duration(video: Optional[Any]) -> float:
|
||||
duration = None
|
||||
|
||||
if video is not None:
|
||||
# Get the frames per second (fps) of the video stream
|
||||
fps = video.get(cv2.CAP_PROP_FPS)
|
||||
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
|
||||
if fps and total_frames:
|
||||
duration = total_frames / fps
|
||||
|
||||
# if cv2 failed need to use ffprobe
|
||||
if duration is None:
|
||||
p = await asyncio.create_subprocess_exec(
|
||||
ffmpeg.ffprobe_path,
|
||||
"-v",
|
||||
"error",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"default=noprint_wrappers=1:nokey=1",
|
||||
f"{url}",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
async def probe_with_ffprobe(
|
||||
url: str,
|
||||
) -> tuple[bool, int, int, Optional[str], float]:
|
||||
"""Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
|
||||
cmd = [
|
||||
ffmpeg.ffprobe_path,
|
||||
"-v",
|
||||
"quiet",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_format",
|
||||
"-show_streams",
|
||||
url,
|
||||
]
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
await p.wait()
|
||||
stdout, _ = await proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
if p.returncode == 0:
|
||||
result = (await p.stdout.read()).decode()
|
||||
else:
|
||||
result = None
|
||||
data = json.loads(stdout.decode())
|
||||
video_streams = [
|
||||
s for s in data.get("streams", []) if s.get("codec_type") == "video"
|
||||
]
|
||||
if not video_streams:
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
if result:
|
||||
try:
|
||||
duration = float(result.strip())
|
||||
except ValueError:
|
||||
duration = -1
|
||||
else:
|
||||
duration = -1
|
||||
v = video_streams[0]
|
||||
width = int(v.get("width", 0))
|
||||
height = int(v.get("height", 0))
|
||||
codec = v.get("codec_name")
|
||||
|
||||
return duration
|
||||
duration_str = data.get("format", {}).get("duration")
|
||||
duration = float(duration_str) if duration_str else -1.0
|
||||
|
||||
width = height = 0
|
||||
return True, width, height, codec, duration
|
||||
except (json.JSONDecodeError, ValueError, KeyError, asyncio.SubprocessError):
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
try:
|
||||
# Open the video stream using OpenCV
|
||||
video = cv2.VideoCapture(url)
|
||||
def probe_with_cv2(url: str) -> tuple[bool, int, int, Optional[str], float]:
|
||||
"""Primary attempt using cv2: returns (valid, width, height, fourcc, duration)."""
|
||||
cap = cv2.VideoCapture(url)
|
||||
if not cap.isOpened():
|
||||
cap.release()
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
# Check if the video stream was opened successfully
|
||||
if not video.isOpened():
|
||||
video = None
|
||||
except Exception:
|
||||
video = None
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
valid = width > 0 and height > 0
|
||||
fourcc = None
|
||||
duration = -1.0
|
||||
|
||||
result = {}
|
||||
if valid:
|
||||
fourcc_int = int(cap.get(cv2.CAP_PROP_FOURCC))
|
||||
fourcc = fourcc_int.to_bytes(4, "little").decode("latin-1").strip()
|
||||
|
||||
if get_duration:
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
if fps > 0 and total_frames > 0:
|
||||
duration = total_frames / fps
|
||||
|
||||
cap.release()
|
||||
return valid, width, height, fourcc, duration
|
||||
|
||||
# try cv2 first
|
||||
has_video, width, height, fourcc, duration = probe_with_cv2(url)
|
||||
|
||||
# fallback to ffprobe if needed
|
||||
if not has_video or (get_duration and duration < 0):
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
|
||||
|
||||
result: dict[str, Any] = {"has_valid_video": has_video}
|
||||
if has_video:
|
||||
result.update({"width": width, "height": height})
|
||||
if fourcc:
|
||||
result["fourcc"] = fourcc
|
||||
if get_duration:
|
||||
result["duration"] = await calculate_duration(video)
|
||||
|
||||
if video is not None:
|
||||
# Get the width of frames in the video stream
|
||||
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
|
||||
# Get the height of frames in the video stream
|
||||
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
|
||||
# Get the stream encoding
|
||||
fourcc_int = int(video.get(cv2.CAP_PROP_FOURCC))
|
||||
fourcc = (
|
||||
chr((fourcc_int >> 0) & 255)
|
||||
+ chr((fourcc_int >> 8) & 255)
|
||||
+ chr((fourcc_int >> 16) & 255)
|
||||
+ chr((fourcc_int >> 24) & 255)
|
||||
)
|
||||
|
||||
# Release the video stream
|
||||
video.release()
|
||||
|
||||
result["width"] = round(width)
|
||||
result["height"] = round(height)
|
||||
result["fourcc"] = fourcc
|
||||
result["duration"] = duration
|
||||
|
||||
return result
|
||||
|
||||
|
141
frigate/video.py
141
frigate/video.py
@@ -1,10 +1,9 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import subprocess as sp
|
||||
import threading
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from multiprocessing import Queue, Value
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Any
|
||||
@@ -13,6 +12,10 @@ import cv2
|
||||
|
||||
from frigate.camera import CameraMetrics, PTZMetrics
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.comms.recordings_updater import (
|
||||
RecordingsDataSubscriber,
|
||||
RecordingsDataTypeEnum,
|
||||
)
|
||||
from frigate.config import CameraConfig, DetectConfig, ModelConfig
|
||||
from frigate.config.camera.camera import CameraTypeEnum
|
||||
from frigate.config.camera.updater import (
|
||||
@@ -20,8 +23,6 @@ from frigate.config.camera.updater import (
|
||||
CameraConfigUpdateSubscriber,
|
||||
)
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CACHE_SEGMENT_FORMAT,
|
||||
PROCESS_PRIORITY_HIGH,
|
||||
REQUEST_REGION_GRID,
|
||||
)
|
||||
@@ -129,7 +130,7 @@ def capture_frames(
|
||||
|
||||
fps.value = frame_rate.eps()
|
||||
skipped_fps.value = skipped_eps.eps()
|
||||
current_frame.value = datetime.datetime.now().timestamp()
|
||||
current_frame.value = datetime.now().timestamp()
|
||||
frame_name = f"{config.name}_frame{frame_index}"
|
||||
frame_buffer = frame_manager.write(frame_name)
|
||||
try:
|
||||
@@ -199,6 +200,11 @@ class CameraWatchdog(threading.Thread):
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.was_enabled = self.config.enabled
|
||||
|
||||
self.segment_subscriber = RecordingsDataSubscriber(RecordingsDataTypeEnum.all)
|
||||
self.latest_valid_segment_time: float = 0
|
||||
self.latest_invalid_segment_time: float = 0
|
||||
self.latest_cache_segment_time: float = 0
|
||||
|
||||
def _update_enabled_state(self) -> bool:
|
||||
"""Fetch the latest config and update enabled state."""
|
||||
self.config_subscriber.check_for_updates()
|
||||
@@ -243,6 +249,11 @@ class CameraWatchdog(threading.Thread):
|
||||
if enabled:
|
||||
self.logger.debug(f"Enabling camera {self.config.name}")
|
||||
self.start_all_ffmpeg()
|
||||
|
||||
# reset all timestamps
|
||||
self.latest_valid_segment_time = 0
|
||||
self.latest_invalid_segment_time = 0
|
||||
self.latest_cache_segment_time = 0
|
||||
else:
|
||||
self.logger.debug(f"Disabling camera {self.config.name}")
|
||||
self.stop_all_ffmpeg()
|
||||
@@ -260,7 +271,37 @@ class CameraWatchdog(threading.Thread):
|
||||
if not enabled:
|
||||
continue
|
||||
|
||||
now = datetime.datetime.now().timestamp()
|
||||
while True:
|
||||
update = self.segment_subscriber.check_for_update(timeout=0)
|
||||
|
||||
if update == (None, None):
|
||||
break
|
||||
|
||||
raw_topic, payload = update
|
||||
if raw_topic and payload:
|
||||
topic = str(raw_topic)
|
||||
camera, segment_time, _ = payload
|
||||
|
||||
if camera != self.config.name:
|
||||
continue
|
||||
|
||||
if topic.endswith(RecordingsDataTypeEnum.valid.value):
|
||||
self.logger.debug(
|
||||
f"Latest valid recording segment time on {camera}: {segment_time}"
|
||||
)
|
||||
self.latest_valid_segment_time = segment_time
|
||||
elif topic.endswith(RecordingsDataTypeEnum.invalid.value):
|
||||
self.logger.warning(
|
||||
f"Invalid recording segment detected for {camera} at {segment_time}"
|
||||
)
|
||||
self.latest_invalid_segment_time = segment_time
|
||||
elif topic.endswith(RecordingsDataTypeEnum.latest.value):
|
||||
if segment_time is not None:
|
||||
self.latest_cache_segment_time = segment_time
|
||||
else:
|
||||
self.latest_cache_segment_time = 0
|
||||
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
if not self.capture_thread.is_alive():
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
|
||||
@@ -298,18 +339,55 @@ class CameraWatchdog(threading.Thread):
|
||||
poll = p["process"].poll()
|
||||
|
||||
if self.config.record.enabled and "record" in p["roles"]:
|
||||
latest_segment_time = self.get_latest_segment_datetime(
|
||||
p.get(
|
||||
"latest_segment_time",
|
||||
datetime.datetime.now().astimezone(datetime.timezone.utc),
|
||||
now_utc = datetime.now().astimezone(timezone.utc)
|
||||
|
||||
latest_cache_dt = (
|
||||
datetime.fromtimestamp(
|
||||
self.latest_cache_segment_time, tz=timezone.utc
|
||||
)
|
||||
if self.latest_cache_segment_time > 0
|
||||
else now_utc - timedelta(seconds=1)
|
||||
)
|
||||
|
||||
if datetime.datetime.now().astimezone(datetime.timezone.utc) > (
|
||||
latest_segment_time + datetime.timedelta(seconds=120)
|
||||
):
|
||||
latest_valid_dt = (
|
||||
datetime.fromtimestamp(
|
||||
self.latest_valid_segment_time, tz=timezone.utc
|
||||
)
|
||||
if self.latest_valid_segment_time > 0
|
||||
else now_utc - timedelta(seconds=1)
|
||||
)
|
||||
|
||||
latest_invalid_dt = (
|
||||
datetime.fromtimestamp(
|
||||
self.latest_invalid_segment_time, tz=timezone.utc
|
||||
)
|
||||
if self.latest_invalid_segment_time > 0
|
||||
else now_utc - timedelta(seconds=1)
|
||||
)
|
||||
|
||||
# ensure segments are still being created and that they have valid video data
|
||||
cache_stale = now_utc > (latest_cache_dt + timedelta(seconds=120))
|
||||
valid_stale = now_utc > (latest_valid_dt + timedelta(seconds=120))
|
||||
invalid_stale_condition = (
|
||||
self.latest_invalid_segment_time > 0
|
||||
and now_utc > (latest_invalid_dt + timedelta(seconds=120))
|
||||
and self.latest_valid_segment_time
|
||||
<= self.latest_invalid_segment_time
|
||||
)
|
||||
invalid_stale = invalid_stale_condition
|
||||
|
||||
if cache_stale or valid_stale or invalid_stale:
|
||||
if cache_stale:
|
||||
reason = "No new recording segments were created"
|
||||
elif valid_stale:
|
||||
reason = "No new valid recording segments were created"
|
||||
else: # invalid_stale
|
||||
reason = (
|
||||
"No valid segments created since last invalid segment"
|
||||
)
|
||||
|
||||
self.logger.error(
|
||||
f"No new recording segments were created for {self.config.name} in the last 120s. restarting the ffmpeg record process..."
|
||||
f"{reason} for {self.config.name} in the last 120s. Restarting the ffmpeg record process..."
|
||||
)
|
||||
p["process"] = start_or_restart_ffmpeg(
|
||||
p["cmd"],
|
||||
@@ -328,7 +406,7 @@ class CameraWatchdog(threading.Thread):
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/record", "online"
|
||||
)
|
||||
p["latest_segment_time"] = latest_segment_time
|
||||
p["latest_segment_time"] = self.latest_cache_segment_time
|
||||
|
||||
if poll is None:
|
||||
continue
|
||||
@@ -346,6 +424,7 @@ class CameraWatchdog(threading.Thread):
|
||||
self.stop_all_ffmpeg()
|
||||
self.logpipe.close()
|
||||
self.config_subscriber.stop()
|
||||
self.segment_subscriber.stop()
|
||||
|
||||
def start_ffmpeg_detect(self):
|
||||
ffmpeg_cmd = [
|
||||
@@ -405,33 +484,6 @@ class CameraWatchdog(threading.Thread):
|
||||
p["logpipe"].close()
|
||||
self.ffmpeg_other_processes.clear()
|
||||
|
||||
def get_latest_segment_datetime(
|
||||
self, latest_segment: datetime.datetime
|
||||
) -> datetime.datetime:
|
||||
"""Checks if ffmpeg is still writing recording segments to cache."""
|
||||
cache_files = sorted(
|
||||
[
|
||||
d
|
||||
for d in os.listdir(CACHE_DIR)
|
||||
if os.path.isfile(os.path.join(CACHE_DIR, d))
|
||||
and d.endswith(".mp4")
|
||||
and not d.startswith("preview_")
|
||||
]
|
||||
)
|
||||
newest_segment_time = latest_segment
|
||||
|
||||
for file in cache_files:
|
||||
if self.config.name in file:
|
||||
basename = os.path.splitext(file)[0]
|
||||
_, date = basename.rsplit("@", maxsplit=1)
|
||||
segment_time = datetime.datetime.strptime(
|
||||
date, CACHE_SEGMENT_FORMAT
|
||||
).astimezone(datetime.timezone.utc)
|
||||
if segment_time > newest_segment_time:
|
||||
newest_segment_time = segment_time
|
||||
|
||||
return newest_segment_time
|
||||
|
||||
|
||||
class CameraCaptureRunner(threading.Thread):
|
||||
def __init__(
|
||||
@@ -727,10 +779,7 @@ def process_frames(
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
if (
|
||||
datetime.datetime.now().astimezone(datetime.timezone.utc)
|
||||
> next_region_update
|
||||
):
|
||||
if datetime.now().astimezone(timezone.utc) > next_region_update:
|
||||
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name)
|
||||
next_region_update = get_tomorrow_at_time(2)
|
||||
|
||||
|
163
generate_config_translations.py
Normal file
163
generate_config_translations.py
Normal file
@@ -0,0 +1,163 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate English translation JSON files from Pydantic config models.
|
||||
|
||||
This script dynamically extracts all top-level config sections from FrigateConfig
|
||||
and generates JSON translation files with titles and descriptions for the web UI.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, get_args, get_origin
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.fields import FieldInfo
|
||||
|
||||
from frigate.config.config import FrigateConfig
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_field_translations(field_info: FieldInfo) -> Dict[str, str]:
|
||||
"""Extract title and description from a Pydantic field."""
|
||||
translations = {}
|
||||
|
||||
if field_info.title:
|
||||
translations["label"] = field_info.title
|
||||
|
||||
if field_info.description:
|
||||
translations["description"] = field_info.description
|
||||
|
||||
return translations
|
||||
|
||||
|
||||
def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]:
|
||||
"""
|
||||
Recursively process a Pydantic model to extract translations.
|
||||
|
||||
Returns a nested dictionary structure matching the config schema,
|
||||
with title and description for each field.
|
||||
"""
|
||||
translations = {}
|
||||
|
||||
model_fields = model.model_fields
|
||||
|
||||
for field_name, field_info in model_fields.items():
|
||||
field_translations = get_field_translations(field_info)
|
||||
|
||||
# Get the field's type annotation
|
||||
field_type = field_info.annotation
|
||||
|
||||
# Handle Optional types
|
||||
origin = get_origin(field_type)
|
||||
|
||||
if origin is Optional or (
|
||||
hasattr(origin, "__name__") and origin.__name__ == "UnionType"
|
||||
):
|
||||
args = get_args(field_type)
|
||||
field_type = next(
|
||||
(arg for arg in args if arg is not type(None)), field_type
|
||||
)
|
||||
|
||||
# Handle Dict types (like Dict[str, CameraConfig])
|
||||
if get_origin(field_type) is dict:
|
||||
dict_args = get_args(field_type)
|
||||
|
||||
if len(dict_args) >= 2:
|
||||
value_type = dict_args[1]
|
||||
|
||||
if isinstance(value_type, type) and issubclass(value_type, BaseModel):
|
||||
nested_translations = process_model_fields(value_type)
|
||||
|
||||
if nested_translations:
|
||||
field_translations["properties"] = nested_translations
|
||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||
nested_translations = process_model_fields(field_type)
|
||||
if nested_translations:
|
||||
field_translations["properties"] = nested_translations
|
||||
|
||||
if field_translations:
|
||||
translations[field_name] = field_translations
|
||||
|
||||
return translations
|
||||
|
||||
|
||||
def generate_section_translation(
|
||||
section_name: str, field_info: FieldInfo
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate translation structure for a top-level config section.
|
||||
"""
|
||||
section_translations = get_field_translations(field_info)
|
||||
field_type = field_info.annotation
|
||||
origin = get_origin(field_type)
|
||||
|
||||
if origin is Optional or (
|
||||
hasattr(origin, "__name__") and origin.__name__ == "UnionType"
|
||||
):
|
||||
args = get_args(field_type)
|
||||
field_type = next((arg for arg in args if arg is not type(None)), field_type)
|
||||
|
||||
# Handle Dict types (like detectors, cameras, camera_groups)
|
||||
if get_origin(field_type) is dict:
|
||||
dict_args = get_args(field_type)
|
||||
if len(dict_args) >= 2:
|
||||
value_type = dict_args[1]
|
||||
if isinstance(value_type, type) and issubclass(value_type, BaseModel):
|
||||
nested = process_model_fields(value_type)
|
||||
if nested:
|
||||
section_translations["properties"] = nested
|
||||
|
||||
# If the field itself is a BaseModel, process it
|
||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||
nested = process_model_fields(field_type)
|
||||
if nested:
|
||||
section_translations["properties"] = nested
|
||||
|
||||
return section_translations
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to generate config translations."""
|
||||
|
||||
# Define output directory
|
||||
output_dir = Path(__file__).parent / "web" / "public" / "locales" / "en" / "config"
|
||||
|
||||
logger.info(f"Output directory: {output_dir}")
|
||||
|
||||
# Clean and recreate the output directory
|
||||
if output_dir.exists():
|
||||
logger.info(f"Removing existing directory: {output_dir}")
|
||||
shutil.rmtree(output_dir)
|
||||
|
||||
logger.info(f"Creating directory: {output_dir}")
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
config_fields = FrigateConfig.model_fields
|
||||
logger.info(f"Found {len(config_fields)} top-level config sections")
|
||||
|
||||
for field_name, field_info in config_fields.items():
|
||||
if field_name.startswith("_"):
|
||||
continue
|
||||
|
||||
logger.info(f"Processing section: {field_name}")
|
||||
section_data = generate_section_translation(field_name, field_info)
|
||||
|
||||
if not section_data:
|
||||
logger.warning(f"No translations found for section: {field_name}")
|
||||
continue
|
||||
|
||||
output_file = output_dir / f"{field_name}.json"
|
||||
with open(output_file, "w", encoding="utf-8") as f:
|
||||
json.dump(section_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
logger.info(f"Generated: {output_file}")
|
||||
|
||||
logger.info("Translation generation complete!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -4,8 +4,8 @@
|
||||
"rsc": false,
|
||||
"tsx": true,
|
||||
"tailwind": {
|
||||
"config": "tailwind.config.js",
|
||||
"css": "index.css",
|
||||
"config": "tailwind.config.cjs",
|
||||
"css": "src/index.css",
|
||||
"baseColor": "slate",
|
||||
"cssVariables": true
|
||||
},
|
||||
|
764
web/package-lock.json
generated
764
web/package-lock.json
generated
@@ -15,7 +15,7 @@
|
||||
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
||||
"@radix-ui/react-checkbox": "^1.1.4",
|
||||
"@radix-ui/react-context-menu": "^2.2.6",
|
||||
"@radix-ui/react-dialog": "^1.1.6",
|
||||
"@radix-ui/react-dialog": "^1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.6",
|
||||
"@radix-ui/react-hover-card": "^1.1.6",
|
||||
"@radix-ui/react-label": "^2.1.2",
|
||||
@@ -23,14 +23,14 @@
|
||||
"@radix-ui/react-radio-group": "^1.2.3",
|
||||
"@radix-ui/react-scroll-area": "^1.2.3",
|
||||
"@radix-ui/react-select": "^2.1.6",
|
||||
"@radix-ui/react-separator": "^1.1.2",
|
||||
"@radix-ui/react-separator": "^1.1.7",
|
||||
"@radix-ui/react-slider": "^1.2.3",
|
||||
"@radix-ui/react-slot": "^1.2.2",
|
||||
"@radix-ui/react-slot": "^1.2.3",
|
||||
"@radix-ui/react-switch": "^1.1.3",
|
||||
"@radix-ui/react-tabs": "^1.1.3",
|
||||
"@radix-ui/react-toggle": "^1.1.2",
|
||||
"@radix-ui/react-toggle-group": "^1.1.2",
|
||||
"@radix-ui/react-tooltip": "^1.1.8",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"apexcharts": "^3.52.0",
|
||||
"axios": "^1.7.7",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
@@ -104,7 +104,7 @@
|
||||
"eslint-plugin-jest": "^28.2.0",
|
||||
"eslint-plugin-prettier": "^5.0.1",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.8",
|
||||
"eslint-plugin-react-refresh": "^0.4.23",
|
||||
"eslint-plugin-vitest-globals": "^1.5.0",
|
||||
"fake-indexeddb": "^6.0.0",
|
||||
"jest-websocket-mock": "^2.5.0",
|
||||
@@ -1250,6 +1250,42 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-dialog": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.6.tgz",
|
||||
"integrity": "sha512-/IVhJV5AceX620DUJ4uYVMymzsipdKBzo3edo+omeskCKGm9FRHM0ebIdbPnlQVJqyuHbuBltQUOG2mOTq2IYw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/primitive": "1.1.1",
|
||||
"@radix-ui/react-compose-refs": "1.1.1",
|
||||
"@radix-ui/react-context": "1.1.1",
|
||||
"@radix-ui/react-dismissable-layer": "1.1.5",
|
||||
"@radix-ui/react-focus-guards": "1.1.1",
|
||||
"@radix-ui/react-focus-scope": "1.1.2",
|
||||
"@radix-ui/react-id": "1.1.0",
|
||||
"@radix-ui/react-portal": "1.1.4",
|
||||
"@radix-ui/react-presence": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.0.2",
|
||||
"@radix-ui/react-slot": "1.1.2",
|
||||
"@radix-ui/react-use-controllable-state": "1.1.0",
|
||||
"aria-hidden": "^1.2.4",
|
||||
"react-remove-scroll": "^2.6.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
|
||||
@@ -1447,23 +1483,23 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.6.tgz",
|
||||
"integrity": "sha512-/IVhJV5AceX620DUJ4uYVMymzsipdKBzo3edo+omeskCKGm9FRHM0ebIdbPnlQVJqyuHbuBltQUOG2mOTq2IYw==",
|
||||
"version": "1.1.15",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz",
|
||||
"integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/primitive": "1.1.1",
|
||||
"@radix-ui/react-compose-refs": "1.1.1",
|
||||
"@radix-ui/react-context": "1.1.1",
|
||||
"@radix-ui/react-dismissable-layer": "1.1.5",
|
||||
"@radix-ui/react-focus-guards": "1.1.1",
|
||||
"@radix-ui/react-focus-scope": "1.1.2",
|
||||
"@radix-ui/react-id": "1.1.0",
|
||||
"@radix-ui/react-portal": "1.1.4",
|
||||
"@radix-ui/react-presence": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.0.2",
|
||||
"@radix-ui/react-slot": "1.1.2",
|
||||
"@radix-ui/react-use-controllable-state": "1.1.0",
|
||||
"@radix-ui/primitive": "1.1.3",
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-context": "1.1.2",
|
||||
"@radix-ui/react-dismissable-layer": "1.1.11",
|
||||
"@radix-ui/react-focus-guards": "1.1.3",
|
||||
"@radix-ui/react-focus-scope": "1.1.7",
|
||||
"@radix-ui/react-id": "1.1.1",
|
||||
"@radix-ui/react-portal": "1.1.9",
|
||||
"@radix-ui/react-presence": "1.1.5",
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-use-controllable-state": "1.2.2",
|
||||
"aria-hidden": "^1.2.4",
|
||||
"react-remove-scroll": "^2.6.3"
|
||||
},
|
||||
@@ -1482,14 +1518,255 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": {
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/primitive": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
|
||||
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-compose-refs": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
|
||||
"integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
|
||||
"integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz",
|
||||
"integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-dismissable-layer": {
|
||||
"version": "1.1.11",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
|
||||
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-compose-refs": "1.1.1"
|
||||
"@radix-ui/primitive": "1.1.3",
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-use-callback-ref": "1.1.1",
|
||||
"@radix-ui/react-use-escape-keydown": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-guards": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz",
|
||||
"integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-scope": {
|
||||
"version": "1.1.7",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
|
||||
"integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-use-callback-ref": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-id": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz",
|
||||
"integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-portal": {
|
||||
"version": "1.1.9",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
|
||||
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-presence": {
|
||||
"version": "1.1.5",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz",
|
||||
"integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-primitive": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
|
||||
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-slot": "1.2.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-callback-ref": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
|
||||
"integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-controllable-state": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz",
|
||||
"integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-effect-event": "0.0.2",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-escape-keydown": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz",
|
||||
"integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-callback-ref": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-layout-effect": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
|
||||
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
@@ -2073,12 +2350,35 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-separator": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.2.tgz",
|
||||
"integrity": "sha512-oZfHcaAp2Y6KFBX6I5P1u7CQoy4lheCGiYj+pGFrHy8E/VNRb5E39TkTr3JrV520csPBTZjkuKFdEsjS5EUNKQ==",
|
||||
"version": "1.1.7",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz",
|
||||
"integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-primitive": "2.0.2"
|
||||
"@radix-ui/react-primitive": "2.1.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
|
||||
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-slot": "1.2.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
@@ -2129,9 +2429,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-slot": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.2.tgz",
|
||||
"integrity": "sha512-y7TBO4xN4Y94FvcWIOIh18fM4R1A8S4q1jhoz4PNzOoHsFcN8pogcFmZrTYAm4F9VRUrWP/Mw7xSKybIeRI+CQ==",
|
||||
"version": "1.2.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
|
||||
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-compose-refs": "1.1.2"
|
||||
@@ -2275,23 +2575,23 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip": {
|
||||
"version": "1.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.8.tgz",
|
||||
"integrity": "sha512-YAA2cu48EkJZdAMHC0dqo9kialOcRStbtiY4nJPaht7Ptrhcvpo+eDChaM6BIs8kL6a8Z5l5poiqLnXcNduOkA==",
|
||||
"version": "1.2.8",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz",
|
||||
"integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/primitive": "1.1.1",
|
||||
"@radix-ui/react-compose-refs": "1.1.1",
|
||||
"@radix-ui/react-context": "1.1.1",
|
||||
"@radix-ui/react-dismissable-layer": "1.1.5",
|
||||
"@radix-ui/react-id": "1.1.0",
|
||||
"@radix-ui/react-popper": "1.2.2",
|
||||
"@radix-ui/react-portal": "1.1.4",
|
||||
"@radix-ui/react-presence": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.0.2",
|
||||
"@radix-ui/react-slot": "1.1.2",
|
||||
"@radix-ui/react-use-controllable-state": "1.1.0",
|
||||
"@radix-ui/react-visually-hidden": "1.1.2"
|
||||
"@radix-ui/primitive": "1.1.3",
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-context": "1.1.2",
|
||||
"@radix-ui/react-dismissable-layer": "1.1.11",
|
||||
"@radix-ui/react-id": "1.1.1",
|
||||
"@radix-ui/react-popper": "1.2.8",
|
||||
"@radix-ui/react-portal": "1.1.9",
|
||||
"@radix-ui/react-presence": "1.1.5",
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-use-controllable-state": "1.2.2",
|
||||
"@radix-ui/react-visually-hidden": "1.2.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
@@ -2308,13 +2608,99 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
|
||||
"integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/primitive": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
|
||||
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-arrow": {
|
||||
"version": "1.1.7",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz",
|
||||
"integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-compose-refs": "1.1.1"
|
||||
"@radix-ui/react-primitive": "2.1.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-compose-refs": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
|
||||
"integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-context": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz",
|
||||
"integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-dismissable-layer": {
|
||||
"version": "1.1.11",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
|
||||
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/primitive": "1.1.3",
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-use-callback-ref": "1.1.1",
|
||||
"@radix-ui/react-use-escape-keydown": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-id": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz",
|
||||
"integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
@@ -2326,6 +2712,241 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-popper": {
|
||||
"version": "1.2.8",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz",
|
||||
"integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@floating-ui/react-dom": "^2.0.0",
|
||||
"@radix-ui/react-arrow": "1.1.7",
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-context": "1.1.2",
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-use-callback-ref": "1.1.1",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1",
|
||||
"@radix-ui/react-use-rect": "1.1.1",
|
||||
"@radix-ui/react-use-size": "1.1.1",
|
||||
"@radix-ui/rect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-portal": {
|
||||
"version": "1.1.9",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
|
||||
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-primitive": "2.1.3",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-presence": {
|
||||
"version": "1.1.5",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz",
|
||||
"integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-compose-refs": "1.1.2",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-primitive": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
|
||||
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-slot": "1.2.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-callback-ref": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
|
||||
"integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-controllable-state": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz",
|
||||
"integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-effect-event": "0.0.2",
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-escape-keydown": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz",
|
||||
"integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-callback-ref": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-layout-effect": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
|
||||
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-rect": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz",
|
||||
"integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/rect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-size": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz",
|
||||
"integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-visually-hidden": {
|
||||
"version": "1.2.3",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz",
|
||||
"integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-primitive": "2.1.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"@types/react-dom": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
|
||||
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/react-dom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/rect": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz",
|
||||
"integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-callback-ref": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz",
|
||||
@@ -2359,6 +2980,39 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-effect-event": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz",
|
||||
"integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@radix-ui/react-use-layout-effect": "1.1.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-effect-event/node_modules/@radix-ui/react-use-layout-effect": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
|
||||
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@radix-ui/react-use-escape-keydown": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz",
|
||||
@@ -5090,13 +5744,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/eslint-plugin-react-refresh": {
|
||||
"version": "0.4.8",
|
||||
"resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.8.tgz",
|
||||
"integrity": "sha512-MIKAclwaDFIiYtVBLzDdm16E+Ty4GwhB6wZlCAG1R3Ur+F9Qbo6PRxpA5DK7XtDgm+WlCoAY2WxAwqhmIDHg6Q==",
|
||||
"version": "0.4.23",
|
||||
"resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.23.tgz",
|
||||
"integrity": "sha512-G4j+rv0NmbIR45kni5xJOrYvCtyD3/7LjpVH8MPPcudXDcNu8gv+4ATTDXTtbRR8rTCM5HxECvCSsRmxKnWDsA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"eslint": ">=7"
|
||||
"eslint": ">=8.40"
|
||||
}
|
||||
},
|
||||
"node_modules/eslint-plugin-vitest-globals": {
|
||||
|
@@ -21,7 +21,7 @@
|
||||
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
||||
"@radix-ui/react-checkbox": "^1.1.4",
|
||||
"@radix-ui/react-context-menu": "^2.2.6",
|
||||
"@radix-ui/react-dialog": "^1.1.6",
|
||||
"@radix-ui/react-dialog": "^1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.6",
|
||||
"@radix-ui/react-hover-card": "^1.1.6",
|
||||
"@radix-ui/react-label": "^2.1.2",
|
||||
@@ -29,14 +29,14 @@
|
||||
"@radix-ui/react-radio-group": "^1.2.3",
|
||||
"@radix-ui/react-scroll-area": "^1.2.3",
|
||||
"@radix-ui/react-select": "^2.1.6",
|
||||
"@radix-ui/react-separator": "^1.1.2",
|
||||
"@radix-ui/react-separator": "^1.1.7",
|
||||
"@radix-ui/react-slider": "^1.2.3",
|
||||
"@radix-ui/react-slot": "^1.2.2",
|
||||
"@radix-ui/react-slot": "^1.2.3",
|
||||
"@radix-ui/react-switch": "^1.1.3",
|
||||
"@radix-ui/react-tabs": "^1.1.3",
|
||||
"@radix-ui/react-toggle": "^1.1.2",
|
||||
"@radix-ui/react-toggle-group": "^1.1.2",
|
||||
"@radix-ui/react-tooltip": "^1.1.8",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"apexcharts": "^3.52.0",
|
||||
"axios": "^1.7.7",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
@@ -110,7 +110,7 @@
|
||||
"eslint-plugin-jest": "^28.2.0",
|
||||
"eslint-plugin-prettier": "^5.0.1",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.8",
|
||||
"eslint-plugin-react-refresh": "^0.4.23",
|
||||
"eslint-plugin-vitest-globals": "^1.5.0",
|
||||
"fake-indexeddb": "^6.0.0",
|
||||
"jest-websocket-mock": "^2.5.0",
|
||||
|
@@ -56,7 +56,14 @@
|
||||
"formattedTimestampMonthDayYear": {
|
||||
"12hour": "МММ д, гггг",
|
||||
"24hour": "МММ д, гггг"
|
||||
}
|
||||
},
|
||||
"ago": "Преди {{timeAgo}}",
|
||||
"untilForTime": "До {{time}}",
|
||||
"untilForRestart": "Докато Frigate рестартира.",
|
||||
"untilRestart": "До рестарт",
|
||||
"mo": "{{time}}мес",
|
||||
"m": "{{time}}м",
|
||||
"s": "{{time}}с"
|
||||
},
|
||||
"button": {
|
||||
"apply": "Приложи",
|
||||
|
@@ -423,7 +423,7 @@
|
||||
"paths": {
|
||||
"title": "Cesty",
|
||||
"desc": "Zobrazit významné body trasy sledovaného objektu",
|
||||
"tips": "<p><strong>Cesty</strong></p><br><p>Čáry a kruhy označují významné body, kterými se sledovaný objekt během svého životního cyklu pohyboval."
|
||||
"tips": "<p><strong>Cesty</strong></p><br><p>Čáry a kruhy označují významné body, kterými se sledovaný objekt během svého životního cyklu pohyboval.</p>"
|
||||
}
|
||||
},
|
||||
"camera": {
|
||||
@@ -604,7 +604,8 @@
|
||||
"admin": "Správce",
|
||||
"adminDesc": "Plný přístup ke všem funkcím.",
|
||||
"viewer": "Divák",
|
||||
"viewerDesc": "Omezení pouze na Živé dashboardy, Revize, Průzkumníka a Exporty."
|
||||
"viewerDesc": "Omezení pouze na Živé dashboardy, Revize, Průzkumníka a Exporty.",
|
||||
"customDesc": "Vlastní role s konkrétním přístupem ke kameře."
|
||||
},
|
||||
"title": "Změnit Roli Uživatele",
|
||||
"desc": "Aktualizovat oprávnění pro <strong>{{username}}</strong>",
|
||||
@@ -794,9 +795,99 @@
|
||||
"title": "Obsah",
|
||||
"imagePlaceholder": "Vybrat obrázek",
|
||||
"textPlaceholder": "Zadat textový obsah",
|
||||
"imageDesc": "Vybrat obrázek, který spustí tuto akci, když bude detekován podobný obrázek."
|
||||
"imageDesc": "Vybrat obrázek, který spustí tuto akci, když bude detekován podobný obrázek.",
|
||||
"textDesc": "Zadejte text, který spustí tuto akci, když bude zjištěn podobný popis sledovaného objektu.",
|
||||
"error": {
|
||||
"required": "Obsah je povinný."
|
||||
}
|
||||
},
|
||||
"actions": {
|
||||
"title": "Akce",
|
||||
"desc": "Ve výchozím nastavení Frigate odesílá MQTT zprávu pro všechny spouštěče. Zvolte dodatečnou akci, která se má provést, když se tento spouštěč aktivuje.",
|
||||
"error": {
|
||||
"min": "Musí být vybrána alespoň jedna akce."
|
||||
}
|
||||
},
|
||||
"threshold": {
|
||||
"title": "Práh",
|
||||
"error": {
|
||||
"min": "Práh musí být alespoň 0",
|
||||
"max": "Práh musí být nanejvýš 1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"createTrigger": "Spouštěč {{name}} byl úspěšně vytvořen.",
|
||||
"updateTrigger": "Spouštěč {{name}} byl úspěšně aktualizován.",
|
||||
"deleteTrigger": "Spouštěč {{name}} byl úspěšně smazán."
|
||||
},
|
||||
"error": {
|
||||
"createTriggerFailed": "Nepodařilo se vytvořit spouštěč: {{errorMessage}}",
|
||||
"updateTriggerFailed": "Nepodařilo se aktualizovat spouštěč: {{errorMessage}}",
|
||||
"deleteTriggerFailed": "Nepodařilo se smazat spouštěč: {{errorMessage}}"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"addRole": "Přidat roli",
|
||||
"table": {
|
||||
"role": "Role",
|
||||
"cameras": "Kamery",
|
||||
"actions": "Akce",
|
||||
"noRoles": "Nebyly nalezeny žádné vlastní role.",
|
||||
"editCameras": "Upravit kamery",
|
||||
"deleteRole": "Smazat roli"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"createRole": "Role {{role}} byla úspěšně vytvořena",
|
||||
"updateCameras": "Kamery byly aktualizovány pro roli {{role}}",
|
||||
"deleteRole": "Role {{role}} byla úspěšně smazána",
|
||||
"userRolesUpdated": "{{count}} uživatel(ů) přiřazených k této roli bylo aktualizováno na „Divák“, který má přístup ke všem kamerám."
|
||||
},
|
||||
"error": {
|
||||
"createRoleFailed": "Nepodařilo se vytvořit roli: {{errorMessage}}",
|
||||
"updateCamerasFailed": "Nepodařilo se aktualizovat kamery: {{errorMessage}}",
|
||||
"deleteRoleFailed": "Nepodařilo se smazat roli: {{errorMessage}}",
|
||||
"userUpdateFailed": "Nepodařilo se aktualizovat role uživatele: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"dialog": {
|
||||
"createRole": {
|
||||
"title": "Vytvořit novou roli",
|
||||
"desc": "Přidejte novou roli a určete oprávnění k přístupu ke kamerám."
|
||||
},
|
||||
"deleteRole": {
|
||||
"title": "Smazat roli",
|
||||
"warn": "Opravdu chcete smazat roli <strong>{{role}}</strong>?",
|
||||
"deleting": "Mazání...",
|
||||
"desc": "Tuto akci nelze vrátit zpět. Role bude trvale smazána a všichni uživatelé s touto rolí budou přeřazeni do role „Divák“, která poskytne přístup ke všem kamerám."
|
||||
},
|
||||
"form": {
|
||||
"role": {
|
||||
"title": "Název role",
|
||||
"placeholder": "Zadejte název role",
|
||||
"desc": "Povolena jsou pouze písmena, čísla, tečky a podtržítka.",
|
||||
"roleIsRequired": "Název role je povinný",
|
||||
"roleOnlyInclude": "Název role smí obsahovat pouze písmena, čísla, . nebo _",
|
||||
"roleExists": "Role s tímto názvem již existuje."
|
||||
},
|
||||
"cameras": {
|
||||
"title": "Kamery",
|
||||
"desc": "Vyberte kamery, ke kterým má tato role přístup. Je vyžadována alespoň jedna kamera.",
|
||||
"required": "Musí být vybrána alespoň jedna kamera."
|
||||
}
|
||||
},
|
||||
"editCameras": {
|
||||
"desc": "Aktualizujte přístup ke kamerám pro roli <strong>{{role}}</strong>.",
|
||||
"title": "Upravit kamery role"
|
||||
}
|
||||
},
|
||||
"management": {
|
||||
"title": "Správa role diváka",
|
||||
"desc": "Spravujte vlastní role diváků a jejich oprávnění k přístupu ke kamerám pro tuto instanci Frigate."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -5,5 +5,80 @@
|
||||
"moo": "Bučanie",
|
||||
"cowbell": "Kravský zvonec",
|
||||
"pig": "Prasa",
|
||||
"speech": "Tale"
|
||||
"speech": "Tale",
|
||||
"bicycle": "Cykel",
|
||||
"car": "Bil",
|
||||
"bellow": "Under",
|
||||
"motorcycle": "Motorcykel",
|
||||
"whispering": "Hvisker",
|
||||
"bus": "Bus",
|
||||
"laughter": "Latter",
|
||||
"train": "Tog",
|
||||
"boat": "Båd",
|
||||
"crying": "Græder",
|
||||
"tambourine": "Tambourin",
|
||||
"marimba": "Marimba",
|
||||
"trumpet": "Trumpet",
|
||||
"trombone": "Trombone",
|
||||
"violin": "Violin",
|
||||
"flute": "Fløjte",
|
||||
"saxophone": "Saxofon",
|
||||
"clarinet": "Klarinet",
|
||||
"harp": "Harpe",
|
||||
"bell": "Klokke",
|
||||
"harmonica": "Harmonika",
|
||||
"bagpipes": "Sækkepibe",
|
||||
"didgeridoo": "Didgeridoo",
|
||||
"jazz": "Jazz",
|
||||
"opera": "Opera",
|
||||
"dubstep": "Dubstep",
|
||||
"blues": "Blues",
|
||||
"song": "Sang",
|
||||
"lullaby": "Vuggevise",
|
||||
"wind": "Vind",
|
||||
"thunderstorm": "Tordenvejr",
|
||||
"thunder": "Torden",
|
||||
"water": "Vand",
|
||||
"rain": "Regn",
|
||||
"raindrop": "Regndråbe",
|
||||
"waterfall": "Vandfald",
|
||||
"waves": "Bølger",
|
||||
"fire": "Ild",
|
||||
"vehicle": "Køretøj",
|
||||
"sailboat": "Sejlbåd",
|
||||
"rowboat": "Robåd",
|
||||
"motorboat": "Motorbåd",
|
||||
"ship": "Skib",
|
||||
"ambulance": "Ambulance",
|
||||
"helicopter": "Helikopter",
|
||||
"skateboard": "Skateboard",
|
||||
"chainsaw": "Motorsav",
|
||||
"door": "Dør",
|
||||
"doorbell": "Dørklokke",
|
||||
"slam": "Smæk",
|
||||
"knock": "Bank",
|
||||
"squeak": "Knirke",
|
||||
"dishes": "Tallerkener",
|
||||
"cutlery": "Bestik",
|
||||
"sink": "Håndvask",
|
||||
"bathtub": "Badekar",
|
||||
"toothbrush": "Tandbørste",
|
||||
"zipper": "Lynlås",
|
||||
"coin": "Mønt",
|
||||
"scissors": "Saks",
|
||||
"typewriter": "Skrivemaskine",
|
||||
"alarm": "Alarm",
|
||||
"telephone": "Telefon",
|
||||
"ringtone": "Ringetone",
|
||||
"siren": "Sirene",
|
||||
"foghorn": "Tågehorn",
|
||||
"whistle": "Fløjte",
|
||||
"clock": "Ur",
|
||||
"printer": "Printer",
|
||||
"camera": "Kamera",
|
||||
"tools": "Værktøj",
|
||||
"hammer": "Hammer",
|
||||
"drill": "Bore",
|
||||
"explosion": "Eksplosion",
|
||||
"fireworks": "Nytårskrudt"
|
||||
}
|
||||
|
@@ -5,7 +5,9 @@
|
||||
"login": "Log ind",
|
||||
"errors": {
|
||||
"usernameRequired": "Brugernavn kræves",
|
||||
"passwordRequired": "Kodeord kræves"
|
||||
"passwordRequired": "Kodeord kræves",
|
||||
"loginFailed": "Login fejlede",
|
||||
"unknownError": "Ukendt fejl. Tjek logs."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,17 @@
|
||||
{
|
||||
"group": {
|
||||
"label": "Kamera Grupper",
|
||||
"add": "Tilføj Kameragruppe"
|
||||
"add": "Tilføj Kameragruppe",
|
||||
"edit": "Rediger Kamera Gruppe",
|
||||
"delete": {
|
||||
"label": "Slet kamera gruppe",
|
||||
"confirm": {
|
||||
"title": "Bekræft sletning",
|
||||
"desc": "Er du sikker på at du vil slette kamera gruppen <em>{{name}}</em>?"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"label": "Navn"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,9 @@
|
||||
{}
|
||||
{
|
||||
"restart": {
|
||||
"title": "Er du sikker på at du vil genstarte Frigate?",
|
||||
"button": "Genstart",
|
||||
"restarting": {
|
||||
"title": "Frigate genstarter"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,17 @@
|
||||
{}
|
||||
{
|
||||
"filter": "Filter",
|
||||
"classes": {
|
||||
"label": "Klasser",
|
||||
"all": {
|
||||
"title": "Alle klasser"
|
||||
},
|
||||
"count_one": "{{count}} Klasse",
|
||||
"count_other": "{{count}} Klasser"
|
||||
},
|
||||
"labels": {
|
||||
"all": {
|
||||
"short": "Labels"
|
||||
},
|
||||
"count_one": "{{count}} Label"
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,8 @@
|
||||
{
|
||||
"iconPicker": {
|
||||
"selectIcon": "Vælg et ikon"
|
||||
"selectIcon": "Vælg et ikon",
|
||||
"search": {
|
||||
"placeholder": "Søg efter ikoner…"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,7 @@
|
||||
{}
|
||||
{
|
||||
"button": {
|
||||
"downloadVideo": {
|
||||
"label": "Download Video"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,5 @@
|
||||
{}
|
||||
{
|
||||
"noRecordingsFoundForThisTime": "Ingen optagelser fundet i det angivet tidsrum",
|
||||
"noPreviewFound": "Ingen forhåndsvisning fundet",
|
||||
"cameraDisabled": "Kamera er deaktiveret"
|
||||
}
|
||||
|
@@ -1,3 +1,18 @@
|
||||
{
|
||||
"person": "Person"
|
||||
"person": "Person",
|
||||
"bicycle": "Cykel",
|
||||
"car": "Bil",
|
||||
"motorcycle": "Motorcykel",
|
||||
"airplane": "Flyvemaskine",
|
||||
"bus": "Bus",
|
||||
"train": "Tog",
|
||||
"boat": "Båd",
|
||||
"traffic_light": "Trafiklys",
|
||||
"vehicle": "Køretøj",
|
||||
"skateboard": "Skateboard",
|
||||
"door": "Dør",
|
||||
"sink": "Håndvask",
|
||||
"toothbrush": "Tandbørste",
|
||||
"scissors": "Saks",
|
||||
"clock": "Ur"
|
||||
}
|
||||
|
@@ -1 +1,6 @@
|
||||
{}
|
||||
{
|
||||
"documentTitle": "Konfigurationsstyring - Frigate",
|
||||
"copyConfig": "Kopiér konfiguration",
|
||||
"saveAndRestart": "Gem & Genstart",
|
||||
"saveOnly": "Kun gem"
|
||||
}
|
||||
|
@@ -1 +1,11 @@
|
||||
{}
|
||||
{
|
||||
"alerts": "Alarmer",
|
||||
"detections": "Detekteringer",
|
||||
"motion": {
|
||||
"label": "Bevægelse",
|
||||
"only": "Kun bevægelse"
|
||||
},
|
||||
"allCameras": "Alle kameraer",
|
||||
"timeline": "Tidslinje",
|
||||
"camera": "Kamera"
|
||||
}
|
||||
|
@@ -9,5 +9,11 @@
|
||||
"lifecycleItemDesc": {
|
||||
"active": "{{label}} blev aktiv"
|
||||
}
|
||||
},
|
||||
"exploreIsUnavailable": {
|
||||
"embeddingsReindexing": {
|
||||
"startingUp": "Starter…",
|
||||
"estimatedTime": "Estimeret tid tilbage:"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,9 @@
|
||||
{
|
||||
"documentTitle": "Eksporter - Frigate",
|
||||
"search": "Søg"
|
||||
"search": "Søg",
|
||||
"deleteExport.desc": "Er du sikker på at du vil slette {{exportName}}?",
|
||||
"editExport": {
|
||||
"title": "Omdøb Eksport",
|
||||
"saveExport": "Gem Eksport"
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,10 @@
|
||||
{
|
||||
"selectItem": "Vælg {{item}}"
|
||||
"selectItem": "Vælg {{item}}",
|
||||
"description": {
|
||||
"addFace": "Gennemgang af tilføjelse til ansigts bibliotek",
|
||||
"placeholder": "Angiv et navn for bibliotek"
|
||||
},
|
||||
"details": {
|
||||
"person": "Person"
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,12 @@
|
||||
{}
|
||||
{
|
||||
"documentTitle": "Live - Frigate",
|
||||
"documentTitle.withCamera": "{{camera}} - Live - Frigate",
|
||||
"twoWayTalk": {
|
||||
"enable": "Aktivér tovejskommunikation",
|
||||
"disable": "Deaktiver tovejskommunikation"
|
||||
},
|
||||
"cameraAudio": {
|
||||
"enable": "Aktivér kameralyd",
|
||||
"disable": "Deaktivér kamera lyd"
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,11 @@
|
||||
{}
|
||||
{
|
||||
"filter": "Filter",
|
||||
"export": "Eksporter",
|
||||
"calendar": "Kalender",
|
||||
"filters": "Filtere",
|
||||
"toast": {
|
||||
"error": {
|
||||
"endTimeMustAfterStartTime": "Sluttidspunkt skal være efter starttidspunkt"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,11 @@
|
||||
{
|
||||
"search": "Søg"
|
||||
"search": "Søg",
|
||||
"savedSearches": "Gemte Søgninger",
|
||||
"searchFor": "Søg efter {{inputValue}}",
|
||||
"button": {
|
||||
"save": "Gem søgning",
|
||||
"delete": "Slet gemt søgning",
|
||||
"filterInformation": "Filter information",
|
||||
"filterActive": "Filtre aktiv"
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,8 @@
|
||||
{
|
||||
"documentTitle": {
|
||||
"default": "Indstillinger - Frigate"
|
||||
"default": "Indstillinger - Frigate",
|
||||
"authentication": "Bruger Indstillinger - Frigate",
|
||||
"camera": "Kamera indstillinger - Frigate",
|
||||
"object": "Debug - Frigate"
|
||||
}
|
||||
}
|
||||
|
@@ -1 +1,12 @@
|
||||
{}
|
||||
{
|
||||
"documentTitle": {
|
||||
"cameras": "Kamera Statistik - Frigate",
|
||||
"storage": "Lagrings Statistik - Frigate",
|
||||
"logs": {
|
||||
"frigate": "Frigate Logs - Frigate",
|
||||
"go2rtc": "Go2RTC Logs - Frigate",
|
||||
"nginx": "Nginx Logs - Frigate"
|
||||
}
|
||||
},
|
||||
"title": "System"
|
||||
}
|
||||
|
26
web/public/locales/en/config/audio.json
Normal file
26
web/public/locales/en/config/audio.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"label": "Global Audio events configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable audio events."
|
||||
},
|
||||
"max_not_heard": {
|
||||
"label": "Seconds of not hearing the type of audio to end the event."
|
||||
},
|
||||
"min_volume": {
|
||||
"label": "Min volume required to run audio detection."
|
||||
},
|
||||
"listen": {
|
||||
"label": "Audio to listen for."
|
||||
},
|
||||
"filters": {
|
||||
"label": "Audio filters."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of audio detection."
|
||||
},
|
||||
"num_threads": {
|
||||
"label": "Number of detection threads"
|
||||
}
|
||||
}
|
||||
}
|
23
web/public/locales/en/config/audio_transcription.json
Normal file
23
web/public/locales/en/config/audio_transcription.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"label": "Audio transcription config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable audio transcription."
|
||||
},
|
||||
"language": {
|
||||
"label": "Language abbreviation to use for audio event transcription/translation."
|
||||
},
|
||||
"device": {
|
||||
"label": "The device used for license plate recognition."
|
||||
},
|
||||
"model_size": {
|
||||
"label": "The size of the embeddings model used."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of camera."
|
||||
},
|
||||
"live_enabled": {
|
||||
"label": "Enable live transcriptions."
|
||||
}
|
||||
}
|
||||
}
|
35
web/public/locales/en/config/auth.json
Normal file
35
web/public/locales/en/config/auth.json
Normal file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"label": "Auth configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable authentication"
|
||||
},
|
||||
"reset_admin_password": {
|
||||
"label": "Reset the admin password on startup"
|
||||
},
|
||||
"cookie_name": {
|
||||
"label": "Name for jwt token cookie"
|
||||
},
|
||||
"cookie_secure": {
|
||||
"label": "Set secure flag on cookie"
|
||||
},
|
||||
"session_length": {
|
||||
"label": "Session length for jwt session tokens"
|
||||
},
|
||||
"refresh_time": {
|
||||
"label": "Refresh the session if it is going to expire in this many seconds"
|
||||
},
|
||||
"failed_login_rate_limit": {
|
||||
"label": "Rate limits for failed login attempts."
|
||||
},
|
||||
"trusted_proxies": {
|
||||
"label": "Trusted proxies for determining IP address to rate limit"
|
||||
},
|
||||
"hash_iterations": {
|
||||
"label": "Password hash iterations"
|
||||
},
|
||||
"roles": {
|
||||
"label": "Role to camera mappings. Empty list grants access to all cameras."
|
||||
}
|
||||
}
|
||||
}
|
37
web/public/locales/en/config/birdseye.json
Normal file
37
web/public/locales/en/config/birdseye.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"label": "Birdseye configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable birdseye view."
|
||||
},
|
||||
"mode": {
|
||||
"label": "Tracking mode."
|
||||
},
|
||||
"restream": {
|
||||
"label": "Restream birdseye via RTSP."
|
||||
},
|
||||
"width": {
|
||||
"label": "Birdseye width."
|
||||
},
|
||||
"height": {
|
||||
"label": "Birdseye height."
|
||||
},
|
||||
"quality": {
|
||||
"label": "Encoding quality."
|
||||
},
|
||||
"inactivity_threshold": {
|
||||
"label": "Birdseye Inactivity Threshold"
|
||||
},
|
||||
"layout": {
|
||||
"label": "Birdseye Layout Config",
|
||||
"properties": {
|
||||
"scaling_factor": {
|
||||
"label": "Birdseye Scaling Factor"
|
||||
},
|
||||
"max_cameras": {
|
||||
"label": "Max cameras"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
14
web/public/locales/en/config/camera_groups.json
Normal file
14
web/public/locales/en/config/camera_groups.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"label": "Camera group configuration",
|
||||
"properties": {
|
||||
"cameras": {
|
||||
"label": "List of cameras in this group."
|
||||
},
|
||||
"icon": {
|
||||
"label": "Icon that represents camera group."
|
||||
},
|
||||
"order": {
|
||||
"label": "Sort order for group."
|
||||
}
|
||||
}
|
||||
}
|
761
web/public/locales/en/config/cameras.json
Normal file
761
web/public/locales/en/config/cameras.json
Normal file
@@ -0,0 +1,761 @@
|
||||
{
|
||||
"label": "Camera configuration.",
|
||||
"properties": {
|
||||
"name": {
|
||||
"label": "Camera name."
|
||||
},
|
||||
"friendly_name": {
|
||||
"label": "Camera friendly name used in the Frigate UI."
|
||||
},
|
||||
"enabled": {
|
||||
"label": "Enable camera."
|
||||
},
|
||||
"audio": {
|
||||
"label": "Audio events configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable audio events."
|
||||
},
|
||||
"max_not_heard": {
|
||||
"label": "Seconds of not hearing the type of audio to end the event."
|
||||
},
|
||||
"min_volume": {
|
||||
"label": "Min volume required to run audio detection."
|
||||
},
|
||||
"listen": {
|
||||
"label": "Audio to listen for."
|
||||
},
|
||||
"filters": {
|
||||
"label": "Audio filters."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of audio detection."
|
||||
},
|
||||
"num_threads": {
|
||||
"label": "Number of detection threads"
|
||||
}
|
||||
}
|
||||
},
|
||||
"audio_transcription": {
|
||||
"label": "Audio transcription config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable audio transcription."
|
||||
},
|
||||
"language": {
|
||||
"label": "Language abbreviation to use for audio event transcription/translation."
|
||||
},
|
||||
"device": {
|
||||
"label": "The device used for license plate recognition."
|
||||
},
|
||||
"model_size": {
|
||||
"label": "The size of the embeddings model used."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of camera."
|
||||
},
|
||||
"live_enabled": {
|
||||
"label": "Enable live transcriptions."
|
||||
}
|
||||
}
|
||||
},
|
||||
"birdseye": {
|
||||
"label": "Birdseye camera configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable birdseye view for camera."
|
||||
},
|
||||
"mode": {
|
||||
"label": "Tracking mode for camera."
|
||||
},
|
||||
"order": {
|
||||
"label": "Position of the camera in the birdseye view."
|
||||
}
|
||||
}
|
||||
},
|
||||
"detect": {
|
||||
"label": "Object detection configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Detection Enabled."
|
||||
},
|
||||
"height": {
|
||||
"label": "Height of the stream for the detect role."
|
||||
},
|
||||
"width": {
|
||||
"label": "Width of the stream for the detect role."
|
||||
},
|
||||
"fps": {
|
||||
"label": "Number of frames per second to process through detection."
|
||||
},
|
||||
"min_initialized": {
|
||||
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
|
||||
},
|
||||
"max_disappeared": {
|
||||
"label": "Maximum number of frames the object can disappear before detection ends."
|
||||
},
|
||||
"stationary": {
|
||||
"label": "Stationary objects config.",
|
||||
"properties": {
|
||||
"interval": {
|
||||
"label": "Frame interval for checking stationary objects."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Number of frames without a position change for an object to be considered stationary"
|
||||
},
|
||||
"max_frames": {
|
||||
"label": "Max frames for stationary objects.",
|
||||
"properties": {
|
||||
"default": {
|
||||
"label": "Default max frames."
|
||||
},
|
||||
"objects": {
|
||||
"label": "Object specific max frames."
|
||||
}
|
||||
}
|
||||
},
|
||||
"classifier": {
|
||||
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
|
||||
}
|
||||
}
|
||||
},
|
||||
"annotation_offset": {
|
||||
"label": "Milliseconds to offset detect annotations by."
|
||||
}
|
||||
}
|
||||
},
|
||||
"face_recognition": {
|
||||
"label": "Face recognition config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable face recognition."
|
||||
},
|
||||
"min_area": {
|
||||
"label": "Min area of face box to consider running face recognition."
|
||||
}
|
||||
}
|
||||
},
|
||||
"ffmpeg": {
|
||||
"label": "FFmpeg configuration for the camera.",
|
||||
"properties": {
|
||||
"path": {
|
||||
"label": "FFmpeg path"
|
||||
},
|
||||
"global_args": {
|
||||
"label": "Global FFmpeg arguments."
|
||||
},
|
||||
"hwaccel_args": {
|
||||
"label": "FFmpeg hardware acceleration arguments."
|
||||
},
|
||||
"input_args": {
|
||||
"label": "FFmpeg input arguments."
|
||||
},
|
||||
"output_args": {
|
||||
"label": "FFmpeg output arguments per role.",
|
||||
"properties": {
|
||||
"detect": {
|
||||
"label": "Detect role FFmpeg output arguments."
|
||||
},
|
||||
"record": {
|
||||
"label": "Record role FFmpeg output arguments."
|
||||
}
|
||||
}
|
||||
},
|
||||
"retry_interval": {
|
||||
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera."
|
||||
},
|
||||
"apple_compatibility": {
|
||||
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players."
|
||||
},
|
||||
"inputs": {
|
||||
"label": "Camera inputs."
|
||||
}
|
||||
}
|
||||
},
|
||||
"live": {
|
||||
"label": "Live playback settings.",
|
||||
"properties": {
|
||||
"streams": {
|
||||
"label": "Friendly names and restream names to use for live view."
|
||||
},
|
||||
"height": {
|
||||
"label": "Live camera view height"
|
||||
},
|
||||
"quality": {
|
||||
"label": "Live camera view quality"
|
||||
}
|
||||
}
|
||||
},
|
||||
"lpr": {
|
||||
"label": "LPR config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable license plate recognition."
|
||||
},
|
||||
"expire_time": {
|
||||
"label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)."
|
||||
},
|
||||
"min_area": {
|
||||
"label": "Minimum area of license plate to begin running recognition."
|
||||
},
|
||||
"enhancement": {
|
||||
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition."
|
||||
}
|
||||
}
|
||||
},
|
||||
"motion": {
|
||||
"label": "Motion detection configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable motion on all cameras."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Motion detection threshold (1-255)."
|
||||
},
|
||||
"lightning_threshold": {
|
||||
"label": "Lightning detection threshold (0.3-1.0)."
|
||||
},
|
||||
"improve_contrast": {
|
||||
"label": "Improve Contrast"
|
||||
},
|
||||
"contour_area": {
|
||||
"label": "Contour Area"
|
||||
},
|
||||
"delta_alpha": {
|
||||
"label": "Delta Alpha"
|
||||
},
|
||||
"frame_alpha": {
|
||||
"label": "Frame Alpha"
|
||||
},
|
||||
"frame_height": {
|
||||
"label": "Frame Height"
|
||||
},
|
||||
"mask": {
|
||||
"label": "Coordinates polygon for the motion mask."
|
||||
},
|
||||
"mqtt_off_delay": {
|
||||
"label": "Delay for updating MQTT with no motion detected."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of motion detection."
|
||||
}
|
||||
}
|
||||
},
|
||||
"objects": {
|
||||
"label": "Object configuration.",
|
||||
"properties": {
|
||||
"track": {
|
||||
"label": "Objects to track."
|
||||
},
|
||||
"filters": {
|
||||
"label": "Object filters.",
|
||||
"properties": {
|
||||
"min_area": {
|
||||
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
|
||||
},
|
||||
"max_area": {
|
||||
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
|
||||
},
|
||||
"min_ratio": {
|
||||
"label": "Minimum ratio of bounding box's width/height for object to be counted."
|
||||
},
|
||||
"max_ratio": {
|
||||
"label": "Maximum ratio of bounding box's width/height for object to be counted."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Average detection confidence threshold for object to be counted."
|
||||
},
|
||||
"min_score": {
|
||||
"label": "Minimum detection confidence for object to be counted."
|
||||
},
|
||||
"mask": {
|
||||
"label": "Detection area polygon mask for this filter configuration."
|
||||
}
|
||||
}
|
||||
},
|
||||
"mask": {
|
||||
"label": "Object mask."
|
||||
},
|
||||
"genai": {
|
||||
"label": "Config for using genai to analyze objects.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable GenAI for camera."
|
||||
},
|
||||
"use_snapshot": {
|
||||
"label": "Use snapshots for generating descriptions."
|
||||
},
|
||||
"prompt": {
|
||||
"label": "Default caption prompt."
|
||||
},
|
||||
"object_prompts": {
|
||||
"label": "Object specific prompts."
|
||||
},
|
||||
"objects": {
|
||||
"label": "List of objects to run generative AI for."
|
||||
},
|
||||
"required_zones": {
|
||||
"label": "List of required zones to be entered in order to run generative AI."
|
||||
},
|
||||
"debug_save_thumbnails": {
|
||||
"label": "Save thumbnails sent to generative AI for debugging purposes."
|
||||
},
|
||||
"send_triggers": {
|
||||
"label": "What triggers to use to send frames to generative AI for a tracked object.",
|
||||
"properties": {
|
||||
"tracked_object_end": {
|
||||
"label": "Send once the object is no longer tracked."
|
||||
},
|
||||
"after_significant_updates": {
|
||||
"label": "Send an early request to generative AI when X frames accumulated."
|
||||
}
|
||||
}
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of generative AI."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"record": {
|
||||
"label": "Record configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable record on all cameras."
|
||||
},
|
||||
"sync_recordings": {
|
||||
"label": "Sync recordings with disk on startup and once a day."
|
||||
},
|
||||
"expire_interval": {
|
||||
"label": "Number of minutes to wait between cleanup runs."
|
||||
},
|
||||
"continuous": {
|
||||
"label": "Continuous recording retention settings.",
|
||||
"properties": {
|
||||
"days": {
|
||||
"label": "Default retention period."
|
||||
}
|
||||
}
|
||||
},
|
||||
"motion": {
|
||||
"label": "Motion recording retention settings.",
|
||||
"properties": {
|
||||
"days": {
|
||||
"label": "Default retention period."
|
||||
}
|
||||
}
|
||||
},
|
||||
"detections": {
|
||||
"label": "Detection specific retention settings.",
|
||||
"properties": {
|
||||
"pre_capture": {
|
||||
"label": "Seconds to retain before event starts."
|
||||
},
|
||||
"post_capture": {
|
||||
"label": "Seconds to retain after event ends."
|
||||
},
|
||||
"retain": {
|
||||
"label": "Event retention settings.",
|
||||
"properties": {
|
||||
"days": {
|
||||
"label": "Default retention period."
|
||||
},
|
||||
"mode": {
|
||||
"label": "Retain mode."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"alerts": {
|
||||
"label": "Alert specific retention settings.",
|
||||
"properties": {
|
||||
"pre_capture": {
|
||||
"label": "Seconds to retain before event starts."
|
||||
},
|
||||
"post_capture": {
|
||||
"label": "Seconds to retain after event ends."
|
||||
},
|
||||
"retain": {
|
||||
"label": "Event retention settings.",
|
||||
"properties": {
|
||||
"days": {
|
||||
"label": "Default retention period."
|
||||
},
|
||||
"mode": {
|
||||
"label": "Retain mode."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"export": {
|
||||
"label": "Recording Export Config",
|
||||
"properties": {
|
||||
"timelapse_args": {
|
||||
"label": "Timelapse Args"
|
||||
}
|
||||
}
|
||||
},
|
||||
"preview": {
|
||||
"label": "Recording Preview Config",
|
||||
"properties": {
|
||||
"quality": {
|
||||
"label": "Quality of recording preview."
|
||||
}
|
||||
}
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of recording."
|
||||
}
|
||||
}
|
||||
},
|
||||
"review": {
|
||||
"label": "Review configuration.",
|
||||
"properties": {
|
||||
"alerts": {
|
||||
"label": "Review alerts config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable alerts."
|
||||
},
|
||||
"labels": {
|
||||
"label": "Labels to create alerts for."
|
||||
},
|
||||
"required_zones": {
|
||||
"label": "List of required zones to be entered in order to save the event as an alert."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of alerts."
|
||||
},
|
||||
"cutoff_time": {
|
||||
"label": "Time to cutoff alerts after no alert-causing activity has occurred."
|
||||
}
|
||||
}
|
||||
},
|
||||
"detections": {
|
||||
"label": "Review detections config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable detections."
|
||||
},
|
||||
"labels": {
|
||||
"label": "Labels to create detections for."
|
||||
},
|
||||
"required_zones": {
|
||||
"label": "List of required zones to be entered in order to save the event as a detection."
|
||||
},
|
||||
"cutoff_time": {
|
||||
"label": "Time to cutoff detection after no detection-causing activity has occurred."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of detections."
|
||||
}
|
||||
}
|
||||
},
|
||||
"genai": {
|
||||
"label": "Review description genai config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable GenAI descriptions for review items."
|
||||
},
|
||||
"alerts": {
|
||||
"label": "Enable GenAI for alerts."
|
||||
},
|
||||
"detections": {
|
||||
"label": "Enable GenAI for detections."
|
||||
},
|
||||
"additional_concerns": {
|
||||
"label": "Additional concerns that GenAI should make note of on this camera."
|
||||
},
|
||||
"debug_save_thumbnails": {
|
||||
"label": "Save thumbnails sent to generative AI for debugging purposes."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of generative AI."
|
||||
},
|
||||
"preferred_language": {
|
||||
"label": "Preferred language for GenAI Response"
|
||||
},
|
||||
"activity_context_prompt": {
|
||||
"label": "Custom activity context prompt defining normal activity patterns for this property."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"semantic_search": {
|
||||
"label": "Semantic search configuration.",
|
||||
"properties": {
|
||||
"triggers": {
|
||||
"label": "Trigger actions on tracked objects that match existing thumbnails or descriptions",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable this trigger"
|
||||
},
|
||||
"type": {
|
||||
"label": "Type of trigger"
|
||||
},
|
||||
"data": {
|
||||
"label": "Trigger content (text phrase or image ID)"
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Confidence score required to run the trigger"
|
||||
},
|
||||
"actions": {
|
||||
"label": "Actions to perform when trigger is matched"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"snapshots": {
|
||||
"label": "Snapshot configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Snapshots enabled."
|
||||
},
|
||||
"clean_copy": {
|
||||
"label": "Create a clean copy of the snapshot image."
|
||||
},
|
||||
"timestamp": {
|
||||
"label": "Add a timestamp overlay on the snapshot."
|
||||
},
|
||||
"bounding_box": {
|
||||
"label": "Add a bounding box overlay on the snapshot."
|
||||
},
|
||||
"crop": {
|
||||
"label": "Crop the snapshot to the detected object."
|
||||
},
|
||||
"required_zones": {
|
||||
"label": "List of required zones to be entered in order to save a snapshot."
|
||||
},
|
||||
"height": {
|
||||
"label": "Snapshot image height."
|
||||
},
|
||||
"retain": {
|
||||
"label": "Snapshot retention.",
|
||||
"properties": {
|
||||
"default": {
|
||||
"label": "Default retention period."
|
||||
},
|
||||
"mode": {
|
||||
"label": "Retain mode."
|
||||
},
|
||||
"objects": {
|
||||
"label": "Object retention period."
|
||||
}
|
||||
}
|
||||
},
|
||||
"quality": {
|
||||
"label": "Quality of the encoded jpeg (0-100)."
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp_style": {
|
||||
"label": "Timestamp style configuration.",
|
||||
"properties": {
|
||||
"position": {
|
||||
"label": "Timestamp position."
|
||||
},
|
||||
"format": {
|
||||
"label": "Timestamp format."
|
||||
},
|
||||
"color": {
|
||||
"label": "Timestamp color.",
|
||||
"properties": {
|
||||
"red": {
|
||||
"label": "Red"
|
||||
},
|
||||
"green": {
|
||||
"label": "Green"
|
||||
},
|
||||
"blue": {
|
||||
"label": "Blue"
|
||||
}
|
||||
}
|
||||
},
|
||||
"thickness": {
|
||||
"label": "Timestamp thickness."
|
||||
},
|
||||
"effect": {
|
||||
"label": "Timestamp effect."
|
||||
}
|
||||
}
|
||||
},
|
||||
"best_image_timeout": {
|
||||
"label": "How long to wait for the image with the highest confidence score."
|
||||
},
|
||||
"mqtt": {
|
||||
"label": "MQTT configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Send image over MQTT."
|
||||
},
|
||||
"timestamp": {
|
||||
"label": "Add timestamp to MQTT image."
|
||||
},
|
||||
"bounding_box": {
|
||||
"label": "Add bounding box to MQTT image."
|
||||
},
|
||||
"crop": {
|
||||
"label": "Crop MQTT image to detected object."
|
||||
},
|
||||
"height": {
|
||||
"label": "MQTT image height."
|
||||
},
|
||||
"required_zones": {
|
||||
"label": "List of required zones to be entered in order to send the image."
|
||||
},
|
||||
"quality": {
|
||||
"label": "Quality of the encoded jpeg (0-100)."
|
||||
}
|
||||
}
|
||||
},
|
||||
"notifications": {
|
||||
"label": "Notifications configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable notifications"
|
||||
},
|
||||
"email": {
|
||||
"label": "Email required for push."
|
||||
},
|
||||
"cooldown": {
|
||||
"label": "Cooldown period for notifications (time in seconds)."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of notifications."
|
||||
}
|
||||
}
|
||||
},
|
||||
"onvif": {
|
||||
"label": "Camera Onvif Configuration.",
|
||||
"properties": {
|
||||
"host": {
|
||||
"label": "Onvif Host"
|
||||
},
|
||||
"port": {
|
||||
"label": "Onvif Port"
|
||||
},
|
||||
"user": {
|
||||
"label": "Onvif Username"
|
||||
},
|
||||
"password": {
|
||||
"label": "Onvif Password"
|
||||
},
|
||||
"tls_insecure": {
|
||||
"label": "Onvif Disable TLS verification"
|
||||
},
|
||||
"autotracking": {
|
||||
"label": "PTZ auto tracking config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable PTZ object autotracking."
|
||||
},
|
||||
"calibrate_on_startup": {
|
||||
"label": "Perform a camera calibration when Frigate starts."
|
||||
},
|
||||
"zooming": {
|
||||
"label": "Autotracker zooming mode."
|
||||
},
|
||||
"zoom_factor": {
|
||||
"label": "Zooming factor (0.1-0.75)."
|
||||
},
|
||||
"track": {
|
||||
"label": "Objects to track."
|
||||
},
|
||||
"required_zones": {
|
||||
"label": "List of required zones to be entered in order to begin autotracking."
|
||||
},
|
||||
"return_preset": {
|
||||
"label": "Name of camera preset to return to when object tracking is over."
|
||||
},
|
||||
"timeout": {
|
||||
"label": "Seconds to delay before returning to preset."
|
||||
},
|
||||
"movement_weights": {
|
||||
"label": "Internal value used for PTZ movements based on the speed of your camera's motor."
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of autotracking."
|
||||
}
|
||||
}
|
||||
},
|
||||
"ignore_time_mismatch": {
|
||||
"label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server"
|
||||
}
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"label": "Camera Type"
|
||||
},
|
||||
"ui": {
|
||||
"label": "Camera UI Modifications.",
|
||||
"properties": {
|
||||
"order": {
|
||||
"label": "Order of camera in UI."
|
||||
},
|
||||
"dashboard": {
|
||||
"label": "Show this camera in Frigate dashboard UI."
|
||||
}
|
||||
}
|
||||
},
|
||||
"webui_url": {
|
||||
"label": "URL to visit the camera directly from system page"
|
||||
},
|
||||
"zones": {
|
||||
"label": "Zone configuration.",
|
||||
"properties": {
|
||||
"filters": {
|
||||
"label": "Zone filters.",
|
||||
"properties": {
|
||||
"min_area": {
|
||||
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
|
||||
},
|
||||
"max_area": {
|
||||
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
|
||||
},
|
||||
"min_ratio": {
|
||||
"label": "Minimum ratio of bounding box's width/height for object to be counted."
|
||||
},
|
||||
"max_ratio": {
|
||||
"label": "Maximum ratio of bounding box's width/height for object to be counted."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Average detection confidence threshold for object to be counted."
|
||||
},
|
||||
"min_score": {
|
||||
"label": "Minimum detection confidence for object to be counted."
|
||||
},
|
||||
"mask": {
|
||||
"label": "Detection area polygon mask for this filter configuration."
|
||||
}
|
||||
}
|
||||
},
|
||||
"coordinates": {
|
||||
"label": "Coordinates polygon for the defined zone."
|
||||
},
|
||||
"distances": {
|
||||
"label": "Real-world distances for the sides of quadrilateral for the defined zone."
|
||||
},
|
||||
"inertia": {
|
||||
"label": "Number of consecutive frames required for object to be considered present in the zone."
|
||||
},
|
||||
"loitering_time": {
|
||||
"label": "Number of seconds that an object must loiter to be considered in the zone."
|
||||
},
|
||||
"speed_threshold": {
|
||||
"label": "Minimum speed value for an object to be considered in the zone."
|
||||
},
|
||||
"objects": {
|
||||
"label": "List of objects that can trigger the zone."
|
||||
}
|
||||
}
|
||||
},
|
||||
"enabled_in_config": {
|
||||
"label": "Keep track of original state of camera."
|
||||
}
|
||||
}
|
||||
}
|
58
web/public/locales/en/config/classification.json
Normal file
58
web/public/locales/en/config/classification.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"label": "Object classification config.",
|
||||
"properties": {
|
||||
"bird": {
|
||||
"label": "Bird classification config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable bird classification."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Minimum classification score required to be considered a match."
|
||||
}
|
||||
}
|
||||
},
|
||||
"custom": {
|
||||
"label": "Custom Classification Model Configs.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable running the model."
|
||||
},
|
||||
"name": {
|
||||
"label": "Name of classification model."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Classification score threshold to change the state."
|
||||
},
|
||||
"object_config": {
|
||||
"properties": {
|
||||
"objects": {
|
||||
"label": "Object types to classify."
|
||||
},
|
||||
"classification_type": {
|
||||
"label": "Type of classification that is applied."
|
||||
}
|
||||
}
|
||||
},
|
||||
"state_config": {
|
||||
"properties": {
|
||||
"cameras": {
|
||||
"label": "Cameras to run classification on.",
|
||||
"properties": {
|
||||
"crop": {
|
||||
"label": "Crop of image frame on this camera to run classification on."
|
||||
}
|
||||
}
|
||||
},
|
||||
"motion": {
|
||||
"label": "If classification should be run when motion is detected in the crop."
|
||||
},
|
||||
"interval": {
|
||||
"label": "Interval to run classification on in seconds."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
8
web/public/locales/en/config/database.json
Normal file
8
web/public/locales/en/config/database.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Database configuration.",
|
||||
"properties": {
|
||||
"path": {
|
||||
"label": "Database path."
|
||||
}
|
||||
}
|
||||
}
|
51
web/public/locales/en/config/detect.json
Normal file
51
web/public/locales/en/config/detect.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"label": "Global object tracking configuration.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Detection Enabled."
|
||||
},
|
||||
"height": {
|
||||
"label": "Height of the stream for the detect role."
|
||||
},
|
||||
"width": {
|
||||
"label": "Width of the stream for the detect role."
|
||||
},
|
||||
"fps": {
|
||||
"label": "Number of frames per second to process through detection."
|
||||
},
|
||||
"min_initialized": {
|
||||
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
|
||||
},
|
||||
"max_disappeared": {
|
||||
"label": "Maximum number of frames the object can disappear before detection ends."
|
||||
},
|
||||
"stationary": {
|
||||
"label": "Stationary objects config.",
|
||||
"properties": {
|
||||
"interval": {
|
||||
"label": "Frame interval for checking stationary objects."
|
||||
},
|
||||
"threshold": {
|
||||
"label": "Number of frames without a position change for an object to be considered stationary"
|
||||
},
|
||||
"max_frames": {
|
||||
"label": "Max frames for stationary objects.",
|
||||
"properties": {
|
||||
"default": {
|
||||
"label": "Default max frames."
|
||||
},
|
||||
"objects": {
|
||||
"label": "Object specific max frames."
|
||||
}
|
||||
}
|
||||
},
|
||||
"classifier": {
|
||||
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
|
||||
}
|
||||
}
|
||||
},
|
||||
"annotation_offset": {
|
||||
"label": "Milliseconds to offset detect annotations by."
|
||||
}
|
||||
}
|
||||
}
|
14
web/public/locales/en/config/detectors.json
Normal file
14
web/public/locales/en/config/detectors.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"label": "Detector hardware configuration.",
|
||||
"properties": {
|
||||
"type": {
|
||||
"label": "Detector Type"
|
||||
},
|
||||
"model": {
|
||||
"label": "Detector specific model configuration."
|
||||
},
|
||||
"model_path": {
|
||||
"label": "Detector specific model path."
|
||||
}
|
||||
}
|
||||
}
|
3
web/public/locales/en/config/environment_vars.json
Normal file
3
web/public/locales/en/config/environment_vars.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"label": "Frigate environment variables."
|
||||
}
|
36
web/public/locales/en/config/face_recognition.json
Normal file
36
web/public/locales/en/config/face_recognition.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"label": "Face recognition config.",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable face recognition."
|
||||
},
|
||||
"model_size": {
|
||||
"label": "The size of the embeddings model used."
|
||||
},
|
||||
"unknown_score": {
|
||||
"label": "Minimum face distance score required to be marked as a potential match."
|
||||
},
|
||||
"detection_threshold": {
|
||||
"label": "Minimum face detection score required to be considered a face."
|
||||
},
|
||||
"recognition_threshold": {
|
||||
"label": "Minimum face distance score required to be considered a match."
|
||||
},
|
||||
"min_area": {
|
||||
"label": "Min area of face box to consider running face recognition."
|
||||
},
|
||||
"min_faces": {
|
||||
"label": "Min face recognitions for the sub label to be applied to the person object."
|
||||
},
|
||||
"save_attempts": {
|
||||
"label": "Number of face attempts to save in the train tab."
|
||||
},
|
||||
"blur_confidence_filter": {
|
||||
"label": "Apply blur quality filter to face confidence."
|
||||
},
|
||||
"device": {
|
||||
"label": "The device key to use for face recognition.",
|
||||
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
|
||||
}
|
||||
}
|
||||
}
|
34
web/public/locales/en/config/ffmpeg.json
Normal file
34
web/public/locales/en/config/ffmpeg.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"label": "Global FFmpeg configuration.",
|
||||
"properties": {
|
||||
"path": {
|
||||
"label": "FFmpeg path"
|
||||
},
|
||||
"global_args": {
|
||||
"label": "Global FFmpeg arguments."
|
||||
},
|
||||
"hwaccel_args": {
|
||||
"label": "FFmpeg hardware acceleration arguments."
|
||||
},
|
||||
"input_args": {
|
||||
"label": "FFmpeg input arguments."
|
||||
},
|
||||
"output_args": {
|
||||
"label": "FFmpeg output arguments per role.",
|
||||
"properties": {
|
||||
"detect": {
|
||||
"label": "Detect role FFmpeg output arguments."
|
||||
},
|
||||
"record": {
|
||||
"label": "Record role FFmpeg output arguments."
|
||||
}
|
||||
}
|
||||
},
|
||||
"retry_interval": {
|
||||
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera."
|
||||
},
|
||||
"apple_compatibility": {
|
||||
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players."
|
||||
}
|
||||
}
|
||||
}
|
20
web/public/locales/en/config/genai.json
Normal file
20
web/public/locales/en/config/genai.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"label": "Generative AI configuration.",
|
||||
"properties": {
|
||||
"api_key": {
|
||||
"label": "Provider API key."
|
||||
},
|
||||
"base_url": {
|
||||
"label": "Provider base url."
|
||||
},
|
||||
"model": {
|
||||
"label": "GenAI model."
|
||||
},
|
||||
"provider": {
|
||||
"label": "GenAI provider."
|
||||
},
|
||||
"provider_options": {
|
||||
"label": "GenAI Provider extra options."
|
||||
}
|
||||
}
|
||||
}
|
3
web/public/locales/en/config/go2rtc.json
Normal file
3
web/public/locales/en/config/go2rtc.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"label": "Global restream configuration."
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user