Compare commits

..

1 Commits

Author SHA1 Message Date
Josh Hawkins
80d1c16783 Update triggers docs to explain why text-to-image triggers are unsupported
Many users won't understand why CLIP models can't be magic object detectors or classifiers
2025-09-19 20:09:05 -05:00
233 changed files with 1729 additions and 10657 deletions

View File

@@ -173,31 +173,6 @@ jobs:
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
synaptics_build:
runs-on: ubuntu-22.04-arm
name: Synaptics Build
needs:
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Synaptics build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: synaptics
files: docker/synaptics/synaptics.hcl
set: |
synaptics.tags=${{ steps.setup.outputs.image-name }}-synaptics
*.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image
assemble_default_build:

View File

@@ -4,14 +4,38 @@ on:
pull_request:
paths-ignore:
- "docs/**"
- ".github/*.yml"
- ".github/DISCUSSION_TEMPLATE/**"
- ".github/ISSUE_TEMPLATE/**"
- ".github/**"
env:
DEFAULT_PYTHON: 3.11
jobs:
build_devcontainer:
runs-on: ubuntu-latest
name: Build Devcontainer
# The Dockerfile contains features that requires buildkit, and since the
# devcontainer cli uses docker-compose to build the image, the only way to
# ensure docker-compose uses buildkit is to explicitly enable it.
env:
DOCKER_BUILDKIT: "1"
steps:
- uses: actions/checkout@v5
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 20.x
- name: Install devcontainer cli
run: npm install --global @devcontainers/cli
- name: Build devcontainer
run: devcontainer build --workspace-folder .
# It would be nice to also test the following commands, but for some
# reason they don't work even though in VS Code devcontainer works.
# - name: Start devcontainer
# run: devcontainer up --workspace-folder .
# - name: Run devcontainer scripts
# run: devcontainer run-user-commands --workspace-folder .
web_lint:
name: Web - Lint
runs-on: ubuntu-latest
@@ -78,18 +102,13 @@ jobs:
uses: actions/checkout@v5
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 20.x
- name: Install devcontainer cli
run: npm install --global @devcontainers/cli
- name: Build devcontainer
env:
DOCKER_BUILDKIT: "1"
run: devcontainer build --workspace-folder .
- name: Start devcontainer
run: devcontainer up --workspace-folder .
- name: Run mypy in devcontainer
run: devcontainer exec --workspace-folder . bash -lc "python3 -u -m mypy --config-file frigate/mypy.ini frigate"
- name: Run unit tests in devcontainer
run: devcontainer exec --workspace-folder . bash -lc "python3 -u -m unittest"
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build
run: make debug
- name: Run mypy
run: docker run --rm --entrypoint=python3 frigate:latest -u -m mypy --config-file frigate/mypy.ini frigate
- name: Run tests
run: docker run --rm --entrypoint=python3 frigate:latest -u -m unittest

View File

@@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM scratch AS go2rtc
ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.9/go2rtc_linux_${TARGETARCH}" go2rtc
FROM wget AS tempio
ARG TARGETARCH

View File

@@ -50,38 +50,6 @@ function set_libva_version() {
export LIBAVFORMAT_VERSION_MAJOR
}
function setup_homekit_config() {
local config_path="$1"
if [[ ! -f "${config_path}" ]]; then
echo "[INFO] Creating empty HomeKit config file..."
echo '{}' > "${config_path}"
fi
# Convert YAML to JSON for jq processing
local temp_json="/tmp/cache/homekit_config.json"
yq eval -o=json "${config_path}" > "${temp_json}" 2>/dev/null || {
echo "[WARNING] Failed to convert HomeKit config to JSON, skipping cleanup"
return 0
}
# Use jq to filter and keep only the homekit section
local cleaned_json="/tmp/cache/homekit_cleaned.json"
jq '
# Keep only the homekit section if it exists, otherwise empty object
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
# Convert back to YAML and write to the config file
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
echo '{"homekit": {}}' > "${config_path}"
}
# Clean up temp files
rm -f "${temp_json}" "${cleaned_json}"
}
set_libva_version
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
@@ -102,10 +70,6 @@ else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi
# HomeKit configuration persistence setup
readonly homekit_config_path="/config/go2rtc_homekit.yml"
setup_homekit_config "${homekit_config_path}"
readonly config_path="/config"
if [[ -x "${config_path}/go2rtc" ]]; then
@@ -118,7 +82,5 @@ fi
echo "[INFO] Starting go2rtc..."
# Replace the bash process with the go2rtc process, redirecting stderr to stdout
# Use HomeKit config as the primary config so writebacks go there
# The main config from Frigate will be loaded as a secondary config
exec 2>&1
exec "${binary_path}" -config="${homekit_config_path}" -config=/dev/shm/go2rtc.yaml
exec "${binary_path}" -config=/dev/shm/go2rtc.yaml

View File

@@ -17,9 +17,7 @@ http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;

View File

@@ -15,14 +15,14 @@ ARG AMDGPU
RUN apt update -qq && \
apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.0.1/ubuntu/jammy/amdgpu-install_7.0.1.70001-1_all.deb && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/6.4.1/ubuntu/jammy/amdgpu-install_6.4.60401-1_all.deb && \
apt install -y ./rocm.deb && \
apt update && \
apt install -qq -y rocm
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* librocroller.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \
cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
@@ -64,10 +64,11 @@ COPY --from=rocm /opt/rocm-dist/ /
#######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0
ENV MIGRAPHX_DISABLE_MIOPEN_FUSION=1
ENV MIGRAPHX_DISABLE_SCHEDULE_PASS=1
ENV MIGRAPHX_DISABLE_REDUCE_FUSION=1
ENV MIGRAPHX_ENABLE_HIPRTC_WORKAROUNDS=1
ENV HSA_ENABLE_SDMA=0
ENV TF_ROCM_USE_IMMEDIATE_MODE=1
# avoid kernel crashes
ENV HIP_FORCE_DEV_KERNARG=1
COPY --from=rocm-dist / /

View File

@@ -1 +1 @@
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.0.1/onnxruntime_migraphx-1.23.0-cp311-cp311-linux_x86_64.whl
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.4.1/onnxruntime_rocm-1.21.1-cp311-cp311-linux_x86_64.whl

View File

@@ -2,7 +2,7 @@ variable "AMDGPU" {
default = "gfx900"
}
variable "ROCM" {
default = "7.0.1"
default = "6.4.1"
}
variable "HSA_OVERRIDE_GFX_VERSION" {
default = ""

View File

@@ -1,28 +0,0 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM wheels AS synap1680-wheels
ARG TARGETARCH
# Install dependencies
RUN wget -qO- "https://github.com/GaryHuang-ASUS/synaptics_astra_sdk/releases/download/v1.5.0/Synaptics-SL1680-v1.5.0-rt.tar" | tar -C / -xzf -
RUN wget -P /wheels/ "https://github.com/synaptics-synap/synap-python/releases/download/v0.0.4-preview/synap_python-0.0.4-cp311-cp311-manylinux_2_35_aarch64.whl"
FROM deps AS synap1680-deps
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=synap1680-wheels,source=/wheels,target=/deps/synap-wheels \
pip3 install --no-deps -U /deps/synap-wheels/*.whl
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY --from=synap1680-wheels /rootfs/usr/local/lib/*.so /usr/lib
ADD https://raw.githubusercontent.com/synaptics-astra/synap-release/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80/model.synap /synaptics/mobilenet.synap

View File

@@ -1,27 +0,0 @@
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wheels"
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "deps"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "rootfs"
}
target synaptics {
dockerfile = "docker/synaptics/Dockerfile"
contexts = {
wheels = "target:wheels",
deps = "target:deps",
rootfs = "target:rootfs"
}
platforms = ["linux/arm64"]
}

View File

@@ -1,15 +0,0 @@
BOARDS += synaptics
local-synaptics: version
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=frigate:latest-synaptics \
--load
build-synaptics: version
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics
push-synaptics: build-synaptics
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics \
--push

View File

@@ -177,11 +177,9 @@ listen [::]:5000 ipv6only=off;
By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
### Set Base Path via HTTP Header
The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy.
For example, in Nginx:
```
location /frigate {
proxy_set_header X-Ingress-Path /frigate;
@@ -190,11 +188,9 @@ location /frigate {
```
### Set Base Path via Environment Variable
When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file.
For example:
```
services:
frigate:
@@ -204,7 +200,6 @@ services:
```
This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http):
```
tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate
```
@@ -223,7 +218,7 @@ To do this:
### Custom go2rtc version
Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
Frigate currently includes go2rtc v1.9.9, there may be certain cases where you want to run a different version of go2rtc.
To do this:

View File

@@ -147,7 +147,7 @@ WEB Digest Algorithm - MD5
Reolink has many different camera models with inconsistently supported features and behavior. The below table shows a summary of various features and recommendations.
| Camera Resolution | Camera Generation | Recommended Stream Type | Additional Notes |
| ----------------- | ------------------------- | --------------------------------- | ----------------------------------------------------------------------- |
|-------------------|---------------------------|-----------------------------------|-------------------------------------------------------------------------|
| 5MP or lower | All | http-flv | Stream is h264 |
| 6MP or higher | Latest (ex: Duo3, CX-8##) | http-flv with ffmpeg 8.0, or rtsp | This uses the new http-flv-enhanced over H265 which requires ffmpeg 8.0 |
| 6MP or higher | Older (ex: RLC-8##) | rtsp | |
@@ -231,7 +231,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk
```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
@@ -250,7 +250,6 @@ TP-Link VIGI cameras need some adjustments to the main stream settings on the ca
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
- Preparation outside of Frigate:
- Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0`
- Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back.
- If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed.
@@ -278,3 +277,5 @@ cameras:
width: 1024
height: 576
```

View File

@@ -1,73 +0,0 @@
---
id: object_classification
title: Object Classification
---
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object.
## Minimum System Requirements
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
### Sub label vs Attribute
- **Sub label**:
- Applied to the objects `sub_label` field.
- Ideal for a single, more specific identity or type.
- Example: `cat``Leo`, `Charlie`, `None`.
- **Attribute**:
- Added as metadata to the object (visible in /events): `<model_name>: <predicted_value>`.
- Ideal when multiple attributes can coexist independently.
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
## Example use cases
### Sub label
- **Known pet vs unknown**: For `dog` objects, set sub label to your pets name (e.g., `buddy`) or `none` for others.
- **Mail truck vs normal car**: For `car`, classify as `mail_truck` vs `car` to filter important arrivals.
- **Delivery vs non-delivery person**: For `person`, classify `delivery` vs `visitor` based on uniform/props.
### Attributes
- **Backpack**: For `person`, add attribute `backpack: yes/no`.
- **Helmet**: For `person` (worksite), add `helmet: yes/no`.
- **Leash**: For `dog`, add `leash: yes/no` (useful for park or yard rules).
- **Ladder rack**: For `truck`, add `ladder_rack: yes/no` to flag service vehicles.
## Configuration
Object classification is configured as a custom classification model. Each model has its own name and settings. You must list which object labels should be classified.
```yaml
classification:
custom:
dog:
threshold: 0.8
object_config:
objects: [dog] # object labels to classify
classification_type: sub_label # or: attribute
```
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page.
### Getting Started
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
// TODO add this section once UI is implemented. Explain process of selecting objects and curating training examples.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
- **Data collection**: Use the models Train tab to gather balanced examples across times of day, weather, and distances.
- **Preprocessing**: Ensure examples reflect object crops similar to Frigates boxes; keep the subject centered.
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.

View File

@@ -1,52 +0,0 @@
---
id: state_classification
title: State Classification
---
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region.
## Minimum System Requirements
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
## Example use cases
- **Door state**: Detect if a garage or front door is open vs closed.
- **Gate state**: Track if a driveway gate is open or closed.
- **Trash day**: Bins at curb vs no bins present.
- **Pool cover**: Cover on vs off.
## Configuration
State classification is configured as a custom classification model. Each model has its own name and settings. You must provide at least one camera crop under `state_config.cameras`.
```yaml
classification:
custom:
front_door:
threshold: 0.8
state_config:
motion: true # run when motion overlaps the crop
interval: 10 # also run every N seconds (optional)
cameras:
front:
crop: [0, 180, 220, 400]
```
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page.
### Getting Started
When choosing a portion of the camera frame for state classification, it is important to make the crop tight around the area of interest to avoid extra signals unrelated to what is being classified.
// TODO add this section once UI is implemented. Explain process of selecting a crop.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
- **Data collection**: Use the models Train tab to gather balanced examples across times of day and weather.

View File

@@ -27,26 +27,13 @@ Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
:::info
Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger sizes are more capable of complex tasks and understanding of situations, but requires more memory and computational resources. It is recommended to try multiple models and experiment to see which performs best.
:::
:::tip
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. https://github.com/skye-harris/ollama-modelfiles contains optimized model configs for this task.
:::
The following models are recommended:
| Model | Notes |
| ----------------- | ----------------------------------------------------------- |
| `Intern3.5VL` | Relatively fast with good vision comprehension
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5vl` | Fast but capable model with good vision comprehension |
| `llava-phi3` | Lightweight and fast model with vision comprehension |
| Model | Size | Notes |
| ----------------- | ------ | ----------------------------------------------------------- |
| `gemma3:4b` | 3.3 GB | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5vl:3b` | 3.2 GB | Fast but capable model with good vision comprehension |
| `llava-phi3:3.8b` | 2.9 GB | Lightweight and fast model with vision comprehension |
:::note
@@ -63,8 +50,6 @@ genai:
model: minicpm-v:8b
provider_options: # other Ollama client options can be defined
keep_alive: -1
options:
num_ctx: 8192 # make sure the context matches other services that are using ollama
```
## Google Gemini
@@ -139,4 +124,4 @@ genai:
provider: azure_openai
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
api_key: "{FRIGATE_OPENAI_API_KEY}"
```
```

View File

@@ -27,18 +27,6 @@ Threat-level definitions:
This will show in the UI as a list of concerns that each review item has along with the general description.
### Defining Typical Activity
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response. The default `activity_context_prompt` is below:
```
- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
```
### Additional Concerns
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:

View File

@@ -427,29 +427,3 @@ cameras:
```
:::
## Synaptics
Hardware accelerated video de-/encoding is supported on Synpatics SL-series SoC.
### Prerequisites
Make sure to follow the [Synaptics specific installation instructions](/frigate/installation#synaptics).
### Configuration
Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
```yaml
ffmpeg:
hwaccel_args: -c:v h264_v4l2m2m
input_args: preset-rtsp-restream
output_args:
record: preset-record-generic-audio-aac
```
:::warning
Make sure that your SoC supports hardware acceleration for your input stream and your input stream is h264 encoding. For example, if your camera streams with h264 encoding, your SoC must be able to de- and encode with it. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
:::

View File

@@ -176,7 +176,7 @@ For devices that support two way talk, Frigate can be configured to use the feat
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` _usually_ supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` *usually* supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
### Streaming options on camera group dashboards
@@ -230,27 +230,7 @@ Note that disabling a camera through the config file (`enabled: False`) removes
If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again.
Errors in stream playback (e.g., connection failures, codec issues, or buffering timeouts) that cause the fallback to low bandwidth mode (jsmpeg) are logged to the browser console for easier debugging. These errors may include:
- Network issues (e.g., MSE or WebRTC network connection problems).
- Unsupported codecs or stream formats (e.g., H.265 in WebRTC, which is not supported in some browsers).
- Buffering timeouts or low bandwidth conditions causing fallback to jsmpeg.
- Browser compatibility problems (e.g., iOS Safari limitations with MSE).
To view browser console logs:
1. Open the Frigate Live View in your browser.
2. Open the browser's Developer Tools (F12 or right-click > Inspect > Console tab).
3. Reproduce the error (e.g., load a problematic stream or simulate network issues).
4. Look for messages prefixed with the camera name.
These logs help identify if the issue is player-specific (MSE vs. WebRTC) or related to camera configuration (e.g., go2rtc streams, codecs). If you see frequent errors:
- Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera_settings_recommendations)).
- Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS).
- Test with a different stream via the UI dropdown (if `live -> streams` is configured).
- For WebRTC-specific issues, ensure port 8555 is forwarded and candidates are set (see (WebRTC Extra Configuration)(#webrtc-extra-configuration)).
- If your cameras are streaming at a high resolution, your browser may be struggling to load all of the streams before the buffering timeout occurs. Frigate prioritizes showing a true live view as quickly as possible. If the fallback occurs often, change your live view settings to use a lower bandwidth substream.
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations).
3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**
@@ -273,7 +253,3 @@ Note that disabling a camera through the config file (`enabled: False`) removes
6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?**
If your camera is streaming (as indicated by a red dot in the upper right, or if it has been set to continuous streaming mode), your browser may be blocking audio until you interact with the page. This is an intentional browser limitation. See [this article](https://developer.mozilla.org/en-US/docs/Web/Media/Autoplay_guide#autoplay_availability). Many browsers have a whitelist feature to change this behavior.
7. **My camera streams have lots of visual artifacts / distortion.**
Some cameras don't include the hardware to support multiple connections to the high resolution stream, and this can cause unexpected behavior. In this case it is recommended to [restream](./restream.md) the high resolution stream so that it can be used for live view and recordings.

View File

@@ -35,7 +35,6 @@ Frigate supports multiple different detectors that work on different types of ha
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
**Nvidia Jetson**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured.
@@ -43,10 +42,6 @@ Frigate supports multiple different detectors that work on different types of ha
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@@ -336,12 +331,6 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::
:::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
:::
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
@@ -453,13 +442,12 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::
When Frigate is started with the following config it will connect to the detector client and transfer the model automatically:
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
apple-silicon:
type: zmq
endpoint: tcp://host.docker.internal:5555
onnx:
type: onnx
model:
model_type: yolo-generic
@@ -555,17 +543,6 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/
### ROCm Supported Models
:::tip
The AMD GPU kernel is known problematic especially when converting models to mxr format. The recommended approach is:
1. Disable object detection in the config.
2. Startup Frigate with the onnx detector configured, the main object detection model will be converted to mxr format and cached in the config directory.
3. Once this is finished as indicated by the logs, enable object detection in the UI and confirm that it is working correctly.
4. Re-enable object detection in the config.
:::
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
- D-FINE models are not supported
@@ -615,12 +592,6 @@ There is no default model provided, the following formats are supported:
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
:::warning
If you are using a Frigate+ YOLO-NAS model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
:::
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
@@ -648,12 +619,6 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::
:::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
:::
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
@@ -792,19 +757,19 @@ To verify that the integration is working correctly, start Frigate and observe t
# Community Supported Detectors
## MemryX MX3
## MemryX MX3
This detector is available for use with the MemryX MX3 accelerator M.2 module. Frigate supports the MX3 on compatible hardware platforms, providing efficient and high-performance object detection.
This detector is available for use with the MemryX MX3 accelerator M.2 module. Frigate supports the MX3 on compatible hardware platforms, providing efficient and high-performance object detection.
See the [installation docs](../frigate/installation.md#memryx-mx3) for information on configuring the MemryX hardware.
To configure a MemryX detector, simply set the `type` attribute to `memryx` and follow the configuration guide below.
### Configuration
### Configuration
To configure the MemryX detector, use the following example configuration:
To configure the MemryX detector, use the following example configuration:
#### Single PCIe MemryX MX3
#### Single PCIe MemryX MX3
```yaml
detectors:
@@ -830,7 +795,7 @@ detectors:
device: PCIe:2
```
### Supported Models
### Supported Models
MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the container at `/memryx_models/model_folder/`.
@@ -844,9 +809,9 @@ The input size for **YOLO-NAS** can be set to either **320x320** (default) or **
- The default size of **320x320** is optimized for lower CPU usage and faster inference times.
##### Configuration
##### Configuration
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
```yaml
detectors:
@@ -868,13 +833,13 @@ model:
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOv9
#### YOLOv9
The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
##### Configuration
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
```yaml
detectors:
@@ -883,7 +848,7 @@ detectors:
device: PCIe:0
model:
model_type: yolo-generic
model_type: yolo-generic
width: 320 # (Can be set to 640 for higher resolution)
height: 320 # (Can be set to 640 for higher resolution)
input_tensor: nchw
@@ -896,13 +861,13 @@ model:
# └── yolov9_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOX
#### YOLOX
The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) and precompiled to DFP.
##### Configuration
##### Configuration
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
```yaml
detectors:
@@ -923,13 +888,13 @@ model:
# ├── yolox.dfp (a file ending with .dfp)
```
#### SSDLite MobileNet v2
#### SSDLite MobileNet v2
The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmmlab.com/model/mmdet-det/ssdlite-e8679f.onnx) and has been converted to DFP.
##### Configuration
##### Configuration
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
```yaml
detectors:
@@ -1064,41 +1029,6 @@ model:
height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416
```
## Synaptics
Hardware accelerated object detection is supported on the following SoCs:
- SL1680
This implementation uses the [Synaptics model conversion](https://synaptics-synap.github.io/doc/v/latest/docs/manual/introduction.html#offline-model-conversion), version v3.1.0.
This implementation is based on sdk `v1.5.0`.
See the [installation docs](../frigate/installation.md#synaptics) for information on configuring the SL-series NPU hardware.
### Configuration
When configuring the Synap detector, you have to specify the model: a local **path**.
#### SSD Mobilenet
A synap model is provided in the container at /mobilenet.synap and is used by this detector type by default. The model comes from [Synap-release Github](https://github.com/synaptics-astra/synap-release/tree/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80).
Use the model configuration shown below when using the synaptics detector with the default synap model:
```yaml
detectors: # required
synap_npu: # required
type: synaptics # required
model: # required
path: /synaptics/mobilenet.synap # required
width: 224 # required
height: 224 # required
tensor_format: nhwc # default value (optional. If you change the model, it is required)
labelmap_path: /labelmap/coco-80.txt # required
```
## Rockchip platform
Hardware accelerated object detection is supported on the following SoCs:
@@ -1373,29 +1303,26 @@ Here are some tips for getting different model types
### Downloading D-FINE Model
D-FINE can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=s` in the first line to `s`, `m`, or `l` size.
To export as ONNX:
1. Clone: https://github.com/Peterande/D-FINE and install all dependencies.
2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE).
3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)`
4. Run the export, making sure you select the right config, for your checkpoint.
Example:
```sh
docker build . --build-arg MODEL_SIZE=s --output . -f- <<'EOF'
FROM python:3.11 AS build
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /dfine
RUN git clone https://github.com/Peterande/D-FINE.git .
RUN uv pip install --system -r requirements.txt
RUN uv pip install --system onnx onnxruntime onnxsim
# Create output directory and download checkpoint
RUN mkdir -p output
ARG MODEL_SIZE
RUN wget https://github.com/Peterande/storage/releases/download/dfinev1.0/dfine_${MODEL_SIZE}_obj2coco.pth -O output/dfine_${MODEL_SIZE}_obj2coco.pth
# Modify line 58 of export_onnx.py to change batch size to 1
RUN sed -i '58s/data = torch.rand(.*)/data = torch.rand(1, 3, 640, 640)/' tools/deployment/export_onnx.py
RUN python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_${MODEL_SIZE}_obj2coco.yml -r output/dfine_${MODEL_SIZE}_obj2coco.pth
FROM scratch
ARG MODEL_SIZE
COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL_SIZE}.onnx
EOF
```
python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth
```
:::tip
Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually.
Make sure you change the batch size to 1 before exporting.
:::
### Download RF-DETR Model
@@ -1447,25 +1374,23 @@ python3 yolo_to_onnx.py -m yolov7-320
#### YOLOv9
YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` and `IMG_SIZE=320` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available model sizes are `t`, `s`, `m`, `c`, and `e`, common image sizes are `320` and `640`).
YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available sizes are `t`, `s`, `m`, `c`, and `e`).
```sh
docker build . --build-arg MODEL_SIZE=t --build-arg IMG_SIZE=320 --output . -f- <<'EOF'
docker build . --build-arg MODEL_SIZE=t --output . -f- <<'EOF'
FROM python:3.11 AS build
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /yolov9
ADD https://github.com/WongKinYiu/yolov9.git .
RUN uv pip install --system -r requirements.txt
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1
RUN uv pip install --system onnx onnxruntime onnx-simplifier>=0.4.1
ARG MODEL_SIZE
ARG IMG_SIZE
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt
RUN sed -i "s/ckpt = torch.load(attempt_download(w), map_location='cpu')/ckpt = torch.load(attempt_download(w), map_location='cpu', weights_only=False)/g" models/experimental.py
RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz ${IMG_SIZE} --simplify --include onnx
RUN python3 export.py --weights ./yolov9-${MODEL_SIZE}.pt --imgsz 320 --simplify --include onnx
FROM scratch
ARG MODEL_SIZE
ARG IMG_SIZE
COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx /yolov9-${MODEL_SIZE}-${IMG_SIZE}.onnx
COPY --from=build /yolov9/yolov9-${MODEL_SIZE}.onnx /
EOF
```

View File

@@ -287,9 +287,6 @@ detect:
max_disappeared: 25
# Optional: Configuration for stationary object tracking
stationary:
# Optional: Stationary classifier that uses visual characteristics to determine if an object
# is stationary even if the box changes enough to be considered motion (default: shown below).
classifier: True
# Optional: Frequency for confirming stationary objects (default: same as threshold)
# When set to 1, object detection will run to confirm the object still exists on every frame.
# If set to 10, object detection will run to confirm the object still exists on every 10th frame.
@@ -700,7 +697,7 @@ audio_transcription:
language: en
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# Uses https://github.com/AlexxIT/go2rtc (v1.9.9)
# NOTE: The default go2rtc API port (1984) must be used,
# changing this port for the integrated go2rtc instance is not supported.
go2rtc:

View File

@@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.9) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration) for more advanced configurations and features.
:::note
@@ -156,7 +156,7 @@ See [this comment](https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-22
## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}`

View File

@@ -56,7 +56,6 @@ Frigate supports multiple different detectors that work on different types of ha
- Runs best with tiny or small size models
- [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
- [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
@@ -95,21 +94,8 @@ Frigate supports multiple different detectors that work on different types of ha
- Runs best with tiny or small size models
- Runs efficiently on low power hardware
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
:::
### Synaptics
- **Synaptics** Default model is **mobilenet**
| Name | Synaptics SL1680 Inference Time |
| ---------------- | ------------------------------- |
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
### Hailo-8
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided.
@@ -124,7 +110,6 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
| Name | Hailo8 Inference Time | Hailo8L Inference Time |
| ---------------- | ---------------------- | ----------------------- |
| ssd mobilenet v1 | ~ 6 ms | ~ 10 ms |
| yolov9-tiny | | 320: 18ms |
| yolov6n | ~ 7 ms | ~ 11 ms |
### Google Coral TPU
@@ -157,19 +142,17 @@ More information is available [in the detector docs](/configuration/object_detec
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
| Name | MobileNetV2 Inference Time | YOLOv9 | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
| -------------- | -------------------------- | ------------------------------------------------- | ------------------------- | ---------------------- | ---------------------------------- |
| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance |
| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | |
| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | |
| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance |
| Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | |
| Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | |
| Intel Arc A310 | ~ 5 ms | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | 320: ~ 8 ms 640: ~ 14 ms | | |
| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | |
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
| -------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- |
| Intel HD 530 | 15 - 35 ms | | | Can only run one detector instance |
| Intel HD 620 | 15 - 25 ms | 320: ~ 35 ms | | |
| Intel HD 630 | ~ 15 ms | 320: ~ 30 ms | | |
| Intel UHD 730 | ~ 10 ms | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel UHD 770 | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel N100 | ~ 15 ms | 320: ~ 25 ms | | Can only run one detector instance |
| Intel Iris XE | ~ 10 ms | 320: ~ 18 ms 640: ~ 50 ms | | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | |
### TensorRT - Nvidia GPU
@@ -177,7 +160,7 @@ Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA
#### Minimum Hardware Support
12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
@@ -197,13 +180,12 @@ Inference speeds will vary greatly depending on the GPU and the model used.
✅ - Accelerated with CUDA Graphs
❌ - Not accelerated with CUDA Graphs
| Name | ✅ YOLOv9 Inference Time | ✅ RF-DETR Inference Time | ❌ YOLO-NAS Inference Time |
| --------- | ------------------------------------- | ------------------------- | -------------------------- |
| GTX 1070 | s-320: 16 ms | | 320: 14 ms |
| RTX 3050 | t-320: 8 ms s-320: 10 ms s-640: 28 ms | Nano-320: ~ 12 ms | 320: ~ 10 ms 640: ~ 16 ms |
| RTX 3070 | t-320: 6 ms s-320: 8 ms s-640: 25 ms | Nano-320: ~ 9 ms | 320: ~ 8 ms 640: ~ 14 ms |
| RTX A4000 | | | 320: ~ 15 ms |
| Tesla P40 | | | 320: ~ 105 ms |
| Name | ✅ YOLOv9 Inference Time | ✅ RF-DETR Inference Time | ❌ YOLO-NAS Inference Time
| --------------- | ------------------------ | ------------------------- | -------------------------- |
| RTX 3050 | t-320: 8 ms s-320: 10 ms | Nano-320: ~ 12 ms | 320: ~ 10 ms 640: ~ 16 ms |
| RTX 3070 | t-320: 6 ms s-320: 8 ms | Nano-320: ~ 9 ms | 320: ~ 8 ms 640: ~ 14 ms |
| RTX A4000 | | | 320: ~ 15 ms |
| Tesla P40 | | | 320: ~ 105 ms |
### Apple Silicon
@@ -215,20 +197,18 @@ Apple Silicon can not run within a container, so a ZMQ proxy is utilized to comm
:::
| Name | YOLOv9 Inference Time |
| ------ | ------------------------------------ |
| M4 | s-320: 10 ms |
| M3 Pro | t-320: 6 ms s-320: 8 ms s-640: 20 ms |
| M1 | s-320: 9ms |
| Name | YOLOv9 Inference Time |
| --------- | ---------------------- |
| M3 Pro | t-320: 6 ms s-320: 8ms |
| M1 | s-320: 9ms |
### ROCm - AMD GPU
With the [ROCm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| --------- | --------------------------- | ------------------------- |
| AMD 780M | t-320: ~ 14 ms s-320: 20 ms | 320: ~ 25 ms 640: ~ 50 ms |
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| --------- | ------------------------- | ------------------------- |
| AMD 780M | t-320: 14 ms s-320: 20 ms | 320: ~ 25 ms 640: ~ 50 ms |
## Community Supported Detectors
@@ -247,14 +227,14 @@ Detailed information is available [in the detector docs](/configuration/object_d
The MX3 is a pipelined architecture, where the maximum frames per second supported (and thus supported number of cameras) cannot be calculated as `1/latency` (1/"Inference Time") and is measured separately. When estimating how many camera streams you may support with your configuration, use the **MX3 Total FPS** column to approximate of the detector's limit, not the Inference Time.
| Model | Input Size | MX3 Inference Time | MX3 Total FPS |
| -------------------- | ---------- | ------------------ | ------------- |
|----------------------|------------|--------------------|---------------|
| YOLO-NAS-Small | 320 | ~ 9 ms | ~ 378 |
| YOLO-NAS-Small | 640 | ~ 21 ms | ~ 138 |
| YOLOv9s | 320 | ~ 16 ms | ~ 382 |
| YOLOv9s | 640 | ~ 41 ms | ~ 110 |
| YOLOX-Small | 640 | ~ 16 ms | ~ 263 |
| SSDlite MobileNet v2 | 320 | ~ 5 ms | ~ 1056 |
Inference speeds may vary depending on the host platform. The above data was measured on an **Intel 13700 CPU**. Platforms like Raspberry Pi, Orange Pi, and other ARM-based SBCs have different levels of processing capability, which may limit total FPS.
### Nvidia Jetson

View File

@@ -256,37 +256,6 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform).
### Synaptics
- SL1680
#### Setup
Follow Frigate's default installation instructions, but use a docker image with `-synaptics` suffix for example `ghcr.io/blakeblackshear/frigate:stable-synaptics`.
Next, you need to grant docker permissions to access your hardware:
- During the configuration process, you should run docker in privileged mode to avoid any errors due to insufficient permissions. To do so, add `privileged: true` to your `docker-compose.yml` file or the `--privileged` flag to your docker run command.
```yaml
devices:
- /dev/synap
- /dev/video0
- /dev/video1
```
or add these options to your `docker run` command:
```
--device /dev/synap \
--device /dev/video0 \
--device /dev/video1
```
#### Configuration
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
## Docker
Running through Docker with Docker Compose is the recommended install method.

View File

@@ -5,7 +5,7 @@ title: Updating
# Updating Frigate
The current stable version of Frigate is **0.16.1**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.1).
The current stable version of Frigate is **0.16.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.0).
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
@@ -33,21 +33,21 @@ If youre running Frigate via Docker (recommended method), follow these steps:
2. **Update and Pull the Latest Image**:
- If using Docker Compose:
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.1` instead of `0.15.2`). For example:
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.0` instead of `0.15.2`). For example:
```yaml
services:
frigate:
image: ghcr.io/blakeblackshear/frigate:0.16.1
image: ghcr.io/blakeblackshear/frigate:0.16.0
```
- Then pull the image:
```bash
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
docker pull ghcr.io/blakeblackshear/frigate:0.16.0
```
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you dont need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
- If using `docker run`:
- Pull the image with the appropriate tag (e.g., `0.16.1`, `0.16.1-tensorrt`, or `stable`):
- Pull the image with the appropriate tag (e.g., `0.16.0`, `0.16.0-tensorrt`, or `stable`):
```bash
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
docker pull ghcr.io/blakeblackshear/frigate:0.16.0
```
3. **Start the Container**:

View File

@@ -3,15 +3,17 @@ id: configuring_go2rtc
title: Configuring go2rtc
---
# Configuring go2rtc
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
- Live stream support for cameras in Home Assistant Integration
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
## Setup a go2rtc stream
# Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#module-streams), not just rtsp.
:::tip
@@ -47,8 +49,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml
go2rtc:
streams:
@@ -109,11 +111,11 @@ section.
:::
### Next steps
## Next steps
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router.
## Homekit Configuration
## Important considerations
To add camera streams to Homekit Frigate must be configured in docker to use `host` networking mode. Once that is done, you can use the go2rtc WebUI (accessed via port 1984, which is disabled by default) to share export a camera to Homekit. Any changes made will automatically be saved to `/config/go2rtc_homekit.yml`.
If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts.

View File

@@ -185,26 +185,6 @@ For clips to be castable to media devices, audio is required and may need to be
<a name="api"></a>
## Camera API
To disable a camera dynamically
```
action: camera.turn_off
data: {}
target:
entity_id: camera.back_deck_cam # your Frigate camera entity ID
```
To enable a camera that has been disabled dynamically
```
action: camera.turn_on
data: {}
target:
entity_id: camera.back_deck_cam # your Frigate camera entity ID
```
## Notification API
Many people do not want to expose Frigate to the web, so the integration creates some public API endpoints that can be used for notifications.

View File

@@ -29,12 +29,12 @@ Message published for each changed tracked object. The first message is publishe
"camera": "front_door",
"frame_time": 1607123961.837752,
"snapshot": {
"frame_time": 1607123965.975463,
"box": [415, 489, 528, 700],
"area": 12728,
"region": [260, 446, 660, 846],
"score": 0.77546,
"attributes": []
"frame_time": 1607123965.975463,
"box": [415, 489, 528, 700],
"area": 12728,
"region": [260, 446, 660, 846],
"score": 0.77546,
"attributes": [],
},
"label": "person",
"sub_label": null,
@@ -61,7 +61,6 @@ Message published for each changed tracked object. The first message is publishe
}, // attributes with top score that have been identified on the object at any point
"current_attributes": [], // detailed data about the current attributes in this frame
"current_estimated_speed": 0.71, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
"average_estimated_speed": 14.3, // average estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
"velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
"recognized_license_plate": "ABC12345", // a recognized license plate for car objects
"recognized_license_plate_score": 0.933451
@@ -71,12 +70,12 @@ Message published for each changed tracked object. The first message is publishe
"camera": "front_door",
"frame_time": 1607123962.082975,
"snapshot": {
"frame_time": 1607123965.975463,
"box": [415, 489, 528, 700],
"area": 12728,
"region": [260, 446, 660, 846],
"score": 0.77546,
"attributes": []
"frame_time": 1607123965.975463,
"box": [415, 489, 528, 700],
"area": 12728,
"region": [260, 446, 660, 846],
"score": 0.77546,
"attributes": [],
},
"label": "person",
"sub_label": ["John Smith", 0.79],
@@ -110,7 +109,6 @@ Message published for each changed tracked object. The first message is publishe
}
],
"current_estimated_speed": 0.77, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
"average_estimated_speed": 14.31, // average estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
"velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
"recognized_license_plate": "ABC12345", // a recognized license plate for car objects
"recognized_license_plate_score": 0.933451

View File

@@ -34,12 +34,6 @@ Model IDs are not secret values and can be shared freely. Access to your model i
:::
:::tip
When setting the plus model id, all other fields should be removed as these are configured automatically with the Frigate+ model config
:::
## Step 4: Adjust your object filters for higher scores
Frigate+ models generally have much higher scores than the default model provided in Frigate. You will likely need to increase your `threshold` and `min_score` values. Here is an example of how these values can be refined, but you should expect these to evolve as your model improves. For more information about how `threshold` and `min_score` are related, see the docs on [object filters](../configuration/object_filters.md#object-scores).

View File

@@ -11,51 +11,34 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ
## Available model types
There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yolov9`. All of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
| Model Type | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, Apple Silicon\*, and Rockchip NPUs. |
_\* Support coming in 0.17_
### YOLOv9 Details
YOLOv9 models are available in `s` and `t` sizes. When requesting a `yolov9` model, you will be prompted to choose a size. If you are unsure what size to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`.
:::info
When switching to YOLOv9, you may need to adjust your thresholds for some objects.
:::
#### Hailo Support
If you have a Hailo device, you will need to specify the hardware you have when submitting a model request because they are not cross compatible. Please test using the available base models before submitting your model request.
#### Rockchip (RKNN) Support
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is coming in 0.17.
| Model Type | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
## Supported detector types
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip\* (`rknn`) detectors.
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), and ONNX (`onnx`) detectors.
:::warning
Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later.
:::
| Hardware | Recommended Detector Type | Recommended Model Type |
| -------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolov9` |
| [NVidia GPU](/configuration/object_detectors#onnx) | `onnx` | `yolov9` |
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `onnx` | `yolov9` |
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
| [NVidia GPU](/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
_\* Requires manual conversion in 0.16. Automatic conversion coming in 0.17._
_\* Requires Frigate 0.15_
## Improving your model

View File

@@ -5,14 +5,14 @@ import frigateHttpApiSidebar from "./docs/integrations/api/sidebar";
const sidebars: SidebarsConfig = {
docs: {
Frigate: [
"frigate/index",
"frigate/hardware",
"frigate/planning_setup",
"frigate/installation",
"frigate/updating",
"frigate/camera_setup",
"frigate/video_pipeline",
"frigate/glossary",
'frigate/index',
'frigate/hardware',
'frigate/planning_setup',
'frigate/installation',
'frigate/updating',
'frigate/camera_setup',
'frigate/video_pipeline',
'frigate/glossary',
],
Guides: [
"guides/getting_started",
@@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
{
type: "link",
label: "Go2RTC Configuration Reference",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration",
} as PropSidebarItemLink,
],
Detectors: [
@@ -40,19 +40,6 @@ const sidebars: SidebarsConfig = {
"configuration/face_recognition",
"configuration/license_plate_recognition",
"configuration/bird_classification",
{
type: "category",
label: "Custom Classification",
link: {
type: "generated-index",
title: "Custom Classification",
description: "Configuration for custom classification models",
},
items: [
"configuration/custom_classification/state_classification",
"configuration/custom_classification/object_classification",
],
},
{
type: "category",
label: "Generative AI",
@@ -119,11 +106,11 @@ const sidebars: SidebarsConfig = {
"configuration/metrics",
"integrations/third_party_extensions",
],
"Frigate+": [
"plus/index",
"plus/annotating",
"plus/first_model",
"plus/faq",
'Frigate+': [
'plus/index',
'plus/annotating',
'plus/first_model',
'plus/faq',
],
Troubleshooting: [
"troubleshooting/faqs",

View File

@@ -822,9 +822,9 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
"""VOD for specific hour. Uses the default timezone (UTC)."""
return await vod_hour(
return vod_hour(
year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
)
@@ -834,9 +834,7 @@ async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
async def vod_hour(
year_month: str, day: int, hour: int, camera_name: str, tz_name: str
):
def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str):
parts = year_month.split("-")
start_date = (
datetime(int(parts[0]), int(parts[1]), day, hour, tzinfo=timezone.utc)
@@ -846,7 +844,7 @@ async def vod_hour(
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
return await vod_ts(camera_name, start_ts, end_ts)
return vod_ts(camera_name, start_ts, end_ts)
@router.get(
@@ -877,7 +875,7 @@ async def vod_event(
if event.end_time is None
else (event.end_time + padding)
)
vod_response = await vod_ts(event.camera, event.start_time - padding, end_ts)
vod_response = vod_ts(event.camera, event.start_time - padding, end_ts)
# If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
if (
@@ -1250,7 +1248,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
@router.get("/events/{event_id}/clip.mp4")
async def event_clip(
def event_clip(
request: Request,
event_id: str,
padding: int = Query(0, description="Padding to apply to clip."),
@@ -1272,9 +1270,7 @@ async def event_clip(
if event.end_time is None
else event.end_time + padding
)
return await recording_clip(
request, event.camera, event.start_time - padding, end_ts
)
return recording_clip(request, event.camera, event.start_time - padding, end_ts)
@router.get("/events/{event_id}/preview.gif")
@@ -1702,7 +1698,7 @@ def preview_thumbnail(file_name: str):
"/{camera_name}/{label}/thumbnail.jpg",
dependencies=[Depends(require_camera_access)],
)
async def label_thumbnail(request: Request, camera_name: str, label: str):
def label_thumbnail(request: Request, camera_name: str, label: str):
label = unquote(label)
event_query = Event.select(fn.MAX(Event.id)).where(Event.camera == camera_name)
if label != "any":
@@ -1711,7 +1707,7 @@ async def label_thumbnail(request: Request, camera_name: str, label: str):
try:
event_id = event_query.scalar()
return await event_thumbnail(request, event_id, Extension.jpg, 60)
return event_thumbnail(request, event_id, 60)
except DoesNotExist:
frame = np.zeros((175, 175, 3), np.uint8)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
@@ -1726,7 +1722,7 @@ async def label_thumbnail(request: Request, camera_name: str, label: str):
@router.get(
"/{camera_name}/{label}/clip.mp4", dependencies=[Depends(require_camera_access)]
)
async def label_clip(request: Request, camera_name: str, label: str):
def label_clip(request: Request, camera_name: str, label: str):
label = unquote(label)
event_query = Event.select(fn.MAX(Event.id)).where(
Event.camera == camera_name, Event.has_clip == True
@@ -1737,7 +1733,7 @@ async def label_clip(request: Request, camera_name: str, label: str):
try:
event = event_query.get()
return await event_clip(request, event.id)
return event_clip(request, event.id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Event not found"}, status_code=404
@@ -1747,7 +1743,7 @@ async def label_clip(request: Request, camera_name: str, label: str):
@router.get(
"/{camera_name}/{label}/snapshot.jpg", dependencies=[Depends(require_camera_access)]
)
async def label_snapshot(request: Request, camera_name: str, label: str):
def label_snapshot(request: Request, camera_name: str, label: str):
"""Returns the snapshot image from the latest event for the given camera and label combo"""
label = unquote(label)
if label == "any":
@@ -1768,7 +1764,7 @@ async def label_snapshot(request: Request, camera_name: str, label: str):
try:
event: Event = event_query.get()
return await event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
return event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
except DoesNotExist:
frame = np.zeros((720, 1280, 3), np.uint8)
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])

View File

@@ -2,7 +2,6 @@
import logging
from enum import Enum
from typing import Any
from .zmq_proxy import Publisher, Subscriber
@@ -11,21 +10,18 @@ logger = logging.getLogger(__name__)
class RecordingsDataTypeEnum(str, Enum):
all = ""
saved = "saved" # segment has been saved to db
latest = "latest" # segment is in cache
valid = "valid" # segment is valid
invalid = "invalid" # segment is invalid
recordings_available_through = "recordings_available_through"
class RecordingsDataPublisher(Publisher[Any]):
class RecordingsDataPublisher(Publisher[tuple[str, float]]):
"""Publishes latest recording data."""
topic_base = "recordings/"
def __init__(self) -> None:
super().__init__()
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
super().__init__(topic.value)
def publish(self, payload: Any, sub_topic: str = "") -> None:
def publish(self, payload: tuple[str, float], sub_topic: str = "") -> None:
super().publish(payload, sub_topic)
@@ -36,11 +32,3 @@ class RecordingsDataSubscriber(Subscriber):
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
super().__init__(topic.value)
def _return_object(
self, topic: str, payload: tuple | None
) -> tuple[str, Any] | tuple[None, None]:
if payload is None:
return (None, None)
return (topic, payload)

View File

@@ -29,10 +29,6 @@ class StationaryConfig(FrigateBaseModel):
default_factory=StationaryMaxFramesConfig,
title="Max frames for stationary objects.",
)
classifier: bool = Field(
default=True,
title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.",
)
class DetectConfig(FrigateBaseModel):

View File

@@ -92,15 +92,6 @@ class GenAIReviewConfig(FrigateBaseModel):
title="Preferred language for GenAI Response",
default=None,
)
activity_context_prompt: str = Field(
default="""- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
""",
title="Custom activity context prompt defining normal activity patterns for this property.",
)
class ReviewConfig(FrigateBaseModel):

View File

@@ -1,349 +0,0 @@
"""Post processor for object descriptions using GenAI."""
import datetime
import logging
import os
import threading
from pathlib import Path
from typing import TYPE_CHECKING, Any
import cv2
import numpy as np
from peewee import DoesNotExist
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor
from frigate.data_processing.types import PostProcessDataEnum
from frigate.genai import GenAIClient
from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
from frigate.util.path import get_event_thumbnail_bytes
if TYPE_CHECKING:
from frigate.embeddings import Embeddings
from ..post.api import PostProcessorApi
from ..types import DataProcessorMetrics
logger = logging.getLogger(__name__)
MAX_THUMBNAILS = 10
class ObjectDescriptionProcessor(PostProcessorApi):
def __init__(
self,
config: FrigateConfig,
embeddings: "Embeddings",
requestor: InterProcessRequestor,
metrics: DataProcessorMetrics,
client: GenAIClient,
semantic_trigger_processor: SemanticTriggerProcessor | None,
):
super().__init__(config, metrics, None)
self.config = config
self.embeddings = embeddings
self.requestor = requestor
self.metrics = metrics
self.genai_client = client
self.semantic_trigger_processor = semantic_trigger_processor
self.tracked_events: dict[str, list[Any]] = {}
self.early_request_sent: dict[str, bool] = {}
self.object_desc_speed = InferenceSpeed(self.metrics.object_desc_speed)
self.object_desc_dps = EventsPerSecond()
self.object_desc_dps.start()
def __handle_frame_update(
self, camera: str, data: dict, yuv_frame: np.ndarray
) -> None:
"""Handle an update to a frame for an object."""
camera_config = self.config.cameras[camera]
# no need to save our own thumbnails if genai is not enabled
# or if the object has become stationary
if not data["stationary"]:
if data["id"] not in self.tracked_events:
self.tracked_events[data["id"]] = []
data["thumbnail"] = create_thumbnail(yuv_frame, data["box"])
# Limit the number of thumbnails saved
if len(self.tracked_events[data["id"]]) >= MAX_THUMBNAILS:
# Always keep the first thumbnail for the event
self.tracked_events[data["id"]].pop(1)
self.tracked_events[data["id"]].append(data)
# check if we're configured to send an early request after a minimum number of updates received
if camera_config.objects.genai.send_triggers.after_significant_updates:
if (
len(self.tracked_events.get(data["id"], []))
>= camera_config.objects.genai.send_triggers.after_significant_updates
and data["id"] not in self.early_request_sent
):
if data["has_clip"] and data["has_snapshot"]:
event: Event = Event.get(Event.id == data["id"])
if (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
) and (
not camera_config.objects.genai.required_zones
or set(data["entered_zones"])
& set(camera_config.objects.genai.required_zones)
):
logger.debug(f"{camera} sending early request to GenAI")
self.early_request_sent[data["id"]] = True
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
[
data["thumbnail"]
for data in self.tracked_events[data["id"]]
],
),
).start()
def __handle_frame_finalize(
self, camera: str, event: Event, thumbnail: bytes
) -> None:
"""Handle the finalization of a frame."""
camera_config = self.config.cameras[camera]
if (
camera_config.objects.genai.enabled
and camera_config.objects.genai.send_triggers.tracked_object_end
and (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
)
and (
not camera_config.objects.genai.required_zones
or set(event.zones) & set(camera_config.objects.genai.required_zones)
)
):
self._process_genai_description(event, camera_config, thumbnail)
def __regenerate_description(self, event_id: str, source: str, force: bool) -> None:
"""Regenerate the description for an event."""
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
logger.error(f"Event {event_id} not found for description regeneration")
return
if self.genai_client is None:
logger.error("GenAI not enabled")
return
camera_config = self.config.cameras[event.camera]
if not camera_config.objects.genai.enabled and not force:
logger.error(f"GenAI not enabled for camera {event.camera}")
return
thumbnail = get_event_thumbnail_bytes(event)
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
logger.debug(
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
)
if event.has_snapshot and source == "snapshot":
snapshot_image = self._read_and_crop_snapshot(event)
if not snapshot_image:
return
embed_image = (
[snapshot_image]
if event.has_snapshot and source == "snapshot"
else (
[data["thumbnail"] for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail]
)
)
self._genai_embed_description(event, embed_image)
def process_data(self, frame_data: dict, data_type: PostProcessDataEnum) -> None:
"""Process a frame update."""
self.metrics.object_desc_dps.value = self.object_desc_dps.eps()
if data_type != PostProcessDataEnum.tracked_object:
return
state: str | None = frame_data.get("state", None)
if state is not None:
logger.debug(f"Processing {state} for {frame_data['camera']}")
if state == "update":
self.__handle_frame_update(
frame_data["camera"], frame_data["data"], frame_data["yuv_frame"]
)
elif state == "finalize":
self.__handle_frame_finalize(
frame_data["camera"], frame_data["event"], frame_data["thumbnail"]
)
def handle_request(self, topic: str, data: dict[str, Any]) -> str | None:
"""Handle a request."""
if topic == "regenerate_description":
self.__regenerate_description(
data["event_id"], data["source"], data["force"]
)
return None
def _read_and_crop_snapshot(self, event: Event) -> bytes | None:
"""Read, decode, and crop the snapshot image."""
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}"
)
return None
try:
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# Crop snapshot based on region
# provide full image if region doesn't exist (manual events)
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data.get(
"region", [0, 0, 1, 1]
)
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
return buffer.tobytes()
except Exception:
return None
def _process_genai_description(
self, event: Event, camera_config: CameraConfig, thumbnail
) -> None:
if event.has_snapshot and camera_config.objects.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event)
if not snapshot_image:
return
num_thumbnails = len(self.tracked_events.get(event.id, []))
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.objects.genai.use_snapshot
else (
[data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.objects.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
parents=True, exist_ok=True
)
for idx, data in enumerate(self.tracked_events[event.id], 1):
jpg_bytes: bytes | None = data["thumbnail"]
if jpg_bytes is None:
logger.warning(f"Unable to save thumbnail {idx} for {event.id}.")
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
# Delete tracked events based on the event_id
if event.id in self.tracked_events:
del self.tracked_events[event.id]
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event."""
start = datetime.datetime.now().timestamp()
camera_config = self.config.cameras[event.camera]
description = self.genai_client.generate_object_description(
camera_config, thumbnails, event
)
if not description:
logger.debug("Failed to generate description for %s", event.id)
return
# fire and forget description update
self.requestor.send_data(
UPDATE_EVENT_DESCRIPTION,
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
"camera": event.camera,
},
)
# Embed the description
if self.config.semantic_search.enabled:
self.embeddings.embed_description(event.id, description)
# Check semantic trigger for this description
if self.semantic_trigger_processor is not None:
self.semantic_trigger_processor.process_data(
{"event_id": event.id, "camera": event.camera, "type": "text"},
PostProcessDataEnum.tracked_object,
)
# Update inference timing metrics
self.object_desc_speed.update(datetime.datetime.now().timestamp() - start)
self.object_desc_dps.update()
logger.debug(
"Generated description for %s (%d images): %s",
event.id,
len(thumbnails),
description,
)

View File

@@ -43,21 +43,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
self.review_descs_dps = EventsPerSecond()
self.review_descs_dps.start()
def calculate_frame_count(self) -> int:
"""Calculate optimal number of frames based on context size."""
# With our preview images (height of 180px) each image should be ~100 tokens per image
# We want to be conservative to not have too long of query times with too many images
context_size = self.genai_client.get_context_size()
if context_size > 10000:
return 20
elif context_size > 6000:
return 16
elif context_size > 4000:
return 12
else:
return 8
def process_data(self, data, data_type):
self.metrics.review_desc_dps.value = self.review_descs_dps.eps()
@@ -108,7 +93,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if camera_config.review.genai.debug_save_thumbnails:
id = data["after"]["id"]
Path(os.path.join(CLIPS_DIR, "genai-requests", f"{id}")).mkdir(
Path(os.path.join(CLIPS_DIR, f"genai-requests/{id}")).mkdir(
parents=True, exist_ok=True
)
shutil.copy(
@@ -139,9 +124,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if topic == EmbeddingsRequestEnum.summarize_review.value:
start_ts = request_data["start_ts"]
end_ts = request_data["end_ts"]
logger.debug(
f"Found GenAI Review Summary request for {start_ts} to {end_ts}"
)
items: list[dict[str, Any]] = [
r["data"]["metadata"]
for r in (
@@ -159,7 +141,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if len(items) == 0:
logger.debug("No review items with metadata found during time period")
return "No activity was found during this time."
return None
important_items = list(
filter(
@@ -172,16 +154,8 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if not important_items:
return "No concerns were found during this time period."
if self.config.review.genai.debug_save_thumbnails:
Path(
os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}")
).mkdir(parents=True, exist_ok=True)
return self.genai_client.generate_review_summary(
start_ts,
end_ts,
important_items,
self.config.review.genai.debug_save_thumbnails,
start_ts, end_ts, important_items
)
else:
return None
@@ -191,6 +165,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
camera: str,
start_time: float,
end_time: float,
desired_frame_count: int = 12,
) -> list[str]:
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera}"
@@ -217,8 +192,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
all_frames.append(os.path.join(preview_dir, file))
frame_count = len(all_frames)
desired_frame_count = self.calculate_frame_count()
if frame_count <= desired_frame_count:
return all_frames
@@ -251,7 +224,7 @@ def run_analysis(
"start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime(
"%A, %I:%M %p"
),
"duration": round(final_data["end_time"] - final_data["start_time"]),
"duration": final_data["end_time"] - final_data["start_time"],
}
objects = []
@@ -275,7 +248,6 @@ def run_analysis(
genai_config.additional_concerns,
genai_config.preferred_language,
genai_config.debug_save_thumbnails,
genai_config.activity_context_prompt,
)
review_inference_speed.update(datetime.datetime.now().timestamp() - start)

View File

@@ -19,4 +19,3 @@ class ReviewMetadata(BaseModel):
default=None,
description="Other concerns highlighted by the user that are observed.",
)
time: str | None = Field(default=None, description="Time of activity.")

View File

@@ -42,13 +42,10 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
self.detected_birds: dict[str, float] = {}
self.labelmap: dict[int, str] = {}
GITHUB_RAW_ENDPOINT = os.environ.get(
"GITHUB_RAW_ENDPOINT", "https://raw.githubusercontent.com"
)
download_path = os.path.join(MODEL_CACHE_DIR, "bird")
self.model_files = {
"bird.tflite": f"{GITHUB_RAW_ENDPOINT}/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
"birdmap.txt": f"{GITHUB_RAW_ENDPOINT}/google-coral/test_data/master/inat_bird_labels.txt",
"bird.tflite": "https://raw.githubusercontent.com/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
"birdmap.txt": "https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt",
}
if not all(

View File

@@ -48,9 +48,9 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.requestor = requestor
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
self.interpreter: Interpreter | None = None
self.tensor_input_details: dict[str, Any] | None = None
self.tensor_output_details: dict[str, Any] | None = None
self.interpreter: Interpreter = None
self.tensor_input_details: dict[str, Any] = None
self.tensor_output_details: dict[str, Any] = None
self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond()
self.inference_speed = InferenceSpeed(
@@ -61,24 +61,17 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
model_path = os.path.join(self.model_dir, "model.tflite")
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
if not os.path.exists(model_path) or not os.path.exists(labelmap_path):
self.interpreter = None
self.tensor_input_details = None
self.tensor_output_details = None
self.labelmap = {}
return
self.interpreter = Interpreter(
model_path=model_path,
model_path=os.path.join(self.model_dir, "model.tflite"),
num_threads=2,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
self.labelmap = load_labels(labelmap_path, prefill=0)
self.labelmap = load_labels(
os.path.join(self.model_dir, "labelmap.txt"),
prefill=0,
)
self.classifications_per_second.start()
def __update_metrics(self, duration: float) -> None:
@@ -147,16 +140,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
logger.warning("Failed to resize image for state classification")
return
if self.interpreter is None:
write_classification_attempt(
self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
now,
"unknown",
0.0,
)
return
input = np.expand_dims(frame, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke()
@@ -214,10 +197,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.model_config = model_config
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
self.interpreter: Interpreter | None = None
self.interpreter: Interpreter = None
self.sub_label_publisher = sub_label_publisher
self.tensor_input_details: dict[str, Any] | None = None
self.tensor_output_details: dict[str, Any] | None = None
self.tensor_input_details: dict[str, Any] = None
self.tensor_output_details: dict[str, Any] = None
self.detected_objects: dict[str, float] = {}
self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond()
@@ -228,24 +211,17 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
model_path = os.path.join(self.model_dir, "model.tflite")
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
if not os.path.exists(model_path) or not os.path.exists(labelmap_path):
self.interpreter = None
self.tensor_input_details = None
self.tensor_output_details = None
self.labelmap = {}
return
self.interpreter = Interpreter(
model_path=model_path,
model_path=os.path.join(self.model_dir, "model.tflite"),
num_threads=2,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
self.labelmap = load_labels(labelmap_path, prefill=0)
self.labelmap = load_labels(
os.path.join(self.model_dir, "labelmap.txt"),
prefill=0,
)
def __update_metrics(self, duration: float) -> None:
self.classifications_per_second.update()
@@ -289,16 +265,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
logger.warning("Failed to resize image for state classification")
return
if self.interpreter is None:
write_classification_attempt(
self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
now,
"unknown",
0.0,
)
return
input = np.expand_dims(crop, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke()

View File

@@ -60,12 +60,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.faces_per_second = EventsPerSecond()
self.inference_speed = InferenceSpeed(self.metrics.face_rec_speed)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
self.model_files = {
"facedet.onnx": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
"landmarkdet.yaml": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
}
if not all(

View File

@@ -22,8 +22,6 @@ class DataProcessorMetrics:
yolov9_lpr_pps: Synchronized
review_desc_speed: Synchronized
review_desc_dps: Synchronized
object_desc_speed: Synchronized
object_desc_dps: Synchronized
classification_speeds: dict[str, Synchronized]
classification_cps: dict[str, Synchronized]
@@ -40,8 +38,6 @@ class DataProcessorMetrics:
self.yolov9_lpr_pps = manager.Value("d", 0.0)
self.review_desc_speed = manager.Value("d", 0.0)
self.review_desc_dps = manager.Value("d", 0.0)
self.object_desc_speed = manager.Value("d", 0.0)
self.object_desc_dps = manager.Value("d", 0.0)
self.classification_speeds = manager.dict()
self.classification_cps = manager.dict()

View File

@@ -78,21 +78,6 @@ class BaseModelRunner(ABC):
class ONNXModelRunner(BaseModelRunner):
"""Run ONNX models using ONNX Runtime."""
@staticmethod
def is_migraphx_complex_model(model_type: str) -> bool:
# Import here to avoid circular imports
from frigate.detectors.detector_config import ModelTypeEnum
from frigate.embeddings.types import EnrichmentModelTypeEnum
return model_type in [
EnrichmentModelTypeEnum.paddleocr.value,
EnrichmentModelTypeEnum.jina_v1.value,
EnrichmentModelTypeEnum.jina_v2.value,
EnrichmentModelTypeEnum.facenet.value,
ModelTypeEnum.rfdetr.value,
ModelTypeEnum.dfine.value,
]
def __init__(self, ort: ort.InferenceSession):
self.ort = ort
@@ -127,7 +112,6 @@ class CudaGraphRunner(BaseModelRunner):
EnrichmentModelTypeEnum.paddleocr.value,
EnrichmentModelTypeEnum.jina_v1.value,
EnrichmentModelTypeEnum.jina_v2.value,
EnrichmentModelTypeEnum.yolov9_license_plate.value,
]
def __init__(self, session: ort.InferenceSession, cuda_device_id: int):
@@ -210,9 +194,6 @@ class OpenVINOModelRunner(BaseModelRunner):
# Apply performance optimization
self.ov_core.set_property(device, {"PERF_COUNT": "NO"})
if device in ["GPU", "AUTO"]:
self.ov_core.set_property(device, {"PERFORMANCE_HINT": "LATENCY"})
# Compile model
self.compiled_model = self.ov_core.compile_model(
model=model_path, device_name=device
@@ -424,8 +405,7 @@ def get_optimized_runner(
) -> BaseModelRunner:
"""Get an optimized runner for the hardware."""
device = device or "AUTO"
if device != "CPU" and is_rknn_compatible(model_path):
if is_rknn_compatible(model_path):
rknn_path = auto_convert_model(model_path)
if rknn_path:
@@ -457,15 +437,6 @@ def get_optimized_runner(
options[0]["device_id"],
)
if (
providers
and providers[0] == "MIGraphXExecutionProvider"
and ONNXModelRunner.is_migraphx_complex_model(model_type)
):
# Don't use MIGraphX for models that are not supported
providers.pop(0)
options.pop(0)
return ONNXModelRunner(
ort.InferenceSession(
model_path,

View File

@@ -161,10 +161,6 @@ class ModelConfig(BaseModel):
if model_info.get("inputDataType"):
self.input_dtype = InputDTypeEnum(model_info["inputDataType"])
# RKNN always uses NHWC
if detector == "rknn":
self.input_tensor = InputTensorEnum.nhwc
# generate list of attribute labels
self.attributes_map = {
**model_info.get("attributes", DEFAULT_ATTRIBUTE_LABEL_MAP),

View File

@@ -33,6 +33,10 @@ def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarr
image = image[0]
h, w = image.shape[:2]
if (w, h) == (320, 320) and (model_w, model_h) == (640, 640):
return cv2.resize(image, (model_w, model_h), interpolation=cv2.INTER_LINEAR)
scale = min(model_w / w, model_h / h)
new_w, new_h = int(w * scale), int(h * scale)
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)

View File

@@ -165,9 +165,8 @@ class Rknn(DetectionApi):
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
urllib.request.urlretrieve(
f"{GITHUB_ENDPOINT}/MarcA711/rknn-models/releases/download/v2.3.2-2/{filename}",
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.2-2/{filename}",
model_cache_dir + filename,
)

View File

@@ -1,103 +0,0 @@
import logging
import os
import numpy as np
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
InputTensorEnum,
ModelTypeEnum,
)
try:
from synap import Network
from synap.postprocessor import Detector
from synap.preprocessor import Preprocessor
from synap.types import Layout, Shape
SYNAP_SUPPORT = True
except ImportError:
SYNAP_SUPPORT = False
logger = logging.getLogger(__name__)
DETECTOR_KEY = "synaptics"
class SynapDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class SynapDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: SynapDetectorConfig):
if not SYNAP_SUPPORT:
logger.error(
"Error importing Synaptics SDK modules. You must use the -synaptics Docker image variant for Synaptics detector support."
)
return
try:
_, ext = os.path.splitext(detector_config.model.path)
if ext and ext != ".synap":
raise ValueError("Model path config for Synap1680 is incorrect.")
synap_network = Network(detector_config.model.path)
logger.info(f"Synap NPU loaded model: {detector_config.model.path}")
except ValueError as ve:
logger.error(f"Synap1680 setup has failed: {ve}")
raise
except Exception as e:
logger.error(f"Failed to init Synap NPU: {e}")
raise
self.width = detector_config.model.width
self.height = detector_config.model.height
self.model_type = detector_config.model.model_type
self.network = synap_network
self.network_input_details = self.network.inputs[0]
self.input_tensor_layout = detector_config.model.input_tensor
# Create Inference Engine
self.preprocessor = Preprocessor()
self.detector = Detector(score_threshold=0.4, iou_threshold=0.4)
def detect_raw(self, tensor_input: np.ndarray):
# It has only been testing for pre-converted mobilenet80 .tflite -> .synap model currently
layout = Layout.nhwc # default layout
detections = np.zeros((20, 6), np.float32)
if self.input_tensor_layout == InputTensorEnum.nhwc:
layout = Layout.nhwc
postprocess_data = self.preprocessor.assign(
self.network.inputs, tensor_input, Shape(tensor_input.shape), layout
)
output_tensor_obj = self.network.predict()
output = self.detector.process(output_tensor_obj, postprocess_data)
if self.model_type == ModelTypeEnum.ssd:
for i, item in enumerate(output.items):
if i == 20:
break
bb = item.bounding_box
# Convert corner coordinates to normalized [0,1] range
x1 = bb.origin.x / self.width # Top-left X
y1 = bb.origin.y / self.height # Top-left Y
x2 = (bb.origin.x + bb.size.x) / self.width # Bottom-right X
y2 = (bb.origin.y + bb.size.y) / self.height # Bottom-right Y
detections[i] = [
item.class_index,
float(item.confidence),
y1,
x1,
y2,
x2,
]
else:
logger.error(f"Unsupported model type: {self.model_type}")
return detections

View File

@@ -1,6 +1,5 @@
import json
import logging
import os
from typing import Any, List
import numpy as np
@@ -47,11 +46,6 @@ class ZmqIpcDetector(DetectionApi):
b) Single frame tensor_bytes of length 20*6*4 bytes (float32).
On any error or timeout, this detector returns a zero array of shape (20, 6).
Model Management:
- On initialization, sends model request to check if model is available
- If model not available, sends model data via ZMQ
- Only starts inference after model is ready
"""
type_key = DETECTOR_KEY
@@ -66,13 +60,6 @@ class ZmqIpcDetector(DetectionApi):
self._socket = None
self._create_socket()
# Model management
self._model_ready = False
self._model_name = self._get_model_name()
# Initialize model if needed
self._initialize_model()
# Preallocate zero result for error paths
self._zero_result = np.zeros((20, 6), np.float32)
@@ -91,167 +78,6 @@ class ZmqIpcDetector(DetectionApi):
logger.debug(f"ZMQ detector connecting to {self._endpoint}")
self._socket.connect(self._endpoint)
def _get_model_name(self) -> str:
"""Get the model filename from the detector config."""
model_path = self.detector_config.model.path
return os.path.basename(model_path)
def _initialize_model(self) -> None:
"""Initialize the model by checking availability and transferring if needed."""
try:
logger.info(f"Initializing model: {self._model_name}")
# Check if model is available and transfer if needed
if self._check_and_transfer_model():
logger.info(f"Model {self._model_name} is ready")
self._model_ready = True
else:
logger.error(f"Failed to initialize model {self._model_name}")
except Exception as e:
logger.error(f"Failed to initialize model: {e}")
def _check_and_transfer_model(self) -> bool:
"""Check if model is available and transfer if needed in one atomic operation."""
try:
# Send model availability request
header = {"model_request": True, "model_name": self._model_name}
header_bytes = json.dumps(header).encode("utf-8")
self._socket.send_multipart([header_bytes])
# Temporarily increase timeout for model operations
original_timeout = self._socket.getsockopt(zmq.RCVTIMEO)
self._socket.setsockopt(zmq.RCVTIMEO, 30000)
try:
response_frames = self._socket.recv_multipart()
finally:
self._socket.setsockopt(zmq.RCVTIMEO, original_timeout)
if len(response_frames) == 1:
try:
response = json.loads(response_frames[0].decode("utf-8"))
model_available = response.get("model_available", False)
model_loaded = response.get("model_loaded", False)
if model_available and model_loaded:
return True
elif model_available and not model_loaded:
logger.error("Model exists but failed to load")
return False
else:
return self._send_model_data()
except json.JSONDecodeError:
logger.warning(
"Received non-JSON response for model availability check"
)
return False
else:
logger.warning(
"Received unexpected response format for model availability check"
)
return False
except Exception as e:
logger.error(f"Failed to check and transfer model: {e}")
return False
def _check_model_availability(self) -> bool:
"""Check if the model is available on the detector."""
try:
# Send model availability request
header = {"model_request": True, "model_name": self._model_name}
header_bytes = json.dumps(header).encode("utf-8")
self._socket.send_multipart([header_bytes])
# Receive response
response_frames = self._socket.recv_multipart()
# Check if this is a JSON response (model management)
if len(response_frames) == 1:
try:
response = json.loads(response_frames[0].decode("utf-8"))
model_available = response.get("model_available", False)
model_loaded = response.get("model_loaded", False)
logger.debug(
f"Model availability check: available={model_available}, loaded={model_loaded}"
)
return model_available and model_loaded
except json.JSONDecodeError:
logger.warning(
"Received non-JSON response for model availability check"
)
return False
else:
logger.warning(
"Received unexpected response format for model availability check"
)
return False
except Exception as e:
logger.error(f"Failed to check model availability: {e}")
return False
def _send_model_data(self) -> bool:
"""Send model data to the detector."""
try:
model_path = self.detector_config.model.path
if not os.path.exists(model_path):
logger.error(f"Model file not found: {model_path}")
return False
logger.info(f"Transferring model to detector: {self._model_name}")
with open(model_path, "rb") as f:
model_data = f.read()
header = {"model_data": True, "model_name": self._model_name}
header_bytes = json.dumps(header).encode("utf-8")
self._socket.send_multipart([header_bytes, model_data])
# Temporarily increase timeout for model loading (can take several seconds)
original_timeout = self._socket.getsockopt(zmq.RCVTIMEO)
self._socket.setsockopt(zmq.RCVTIMEO, 30000)
try:
# Receive response
response_frames = self._socket.recv_multipart()
finally:
# Restore original timeout
self._socket.setsockopt(zmq.RCVTIMEO, original_timeout)
# Check if this is a JSON response (model management)
if len(response_frames) == 1:
try:
response = json.loads(response_frames[0].decode("utf-8"))
model_saved = response.get("model_saved", False)
model_loaded = response.get("model_loaded", False)
if model_saved and model_loaded:
logger.info(
f"Model {self._model_name} transferred and loaded successfully"
)
else:
logger.error(
f"Model transfer failed: saved={model_saved}, loaded={model_loaded}"
)
return model_saved and model_loaded
except json.JSONDecodeError:
logger.warning("Received non-JSON response for model data transfer")
return False
else:
logger.warning(
"Received unexpected response format for model data transfer"
)
return False
except Exception as e:
logger.error(f"Failed to send model data: {e}")
return False
def _build_header(self, tensor_input: np.ndarray) -> bytes:
header: dict[str, Any] = {
"shape": list(tensor_input.shape),
@@ -285,10 +111,6 @@ class ZmqIpcDetector(DetectionApi):
return self._zero_result
def detect_raw(self, tensor_input: np.ndarray) -> np.ndarray:
if not self._model_ready:
logger.warning("Model not ready, returning zero detections")
return self._zero_result
try:
header_bytes = self._build_header(tensor_input)
payload_bytes = memoryview(tensor_input.tobytes(order="C"))
@@ -301,13 +123,13 @@ class ZmqIpcDetector(DetectionApi):
detections = self._decode_response(reply_frames)
# Ensure output shape and dtype are exactly as expected
return detections
except zmq.Again:
# Timeout
logger.debug("ZMQ detector request timed out; resetting socket")
try:
self._create_socket()
self._initialize_model()
except Exception:
pass
return self._zero_result
@@ -315,7 +137,6 @@ class ZmqIpcDetector(DetectionApi):
logger.error(f"ZMQ detector ZMQError: {exc}; resetting socket")
try:
self._create_socket()
self._initialize_model()
except Exception:
pass
return self._zero_result

View File

@@ -3,10 +3,14 @@
import base64
import datetime
import logging
import os
import threading
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
from pathlib import Path
from typing import Any, Optional
import cv2
import numpy as np
from peewee import DoesNotExist
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
@@ -26,12 +30,16 @@ from frigate.comms.recordings_updater import (
RecordingsDataTypeEnum,
)
from frigate.comms.review_updater import ReviewDataSubscriber
from frigate.config import FrigateConfig
from frigate.config import CameraConfig, FrigateConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber,
)
from frigate.const import (
CLIPS_DIR,
UPDATE_EVENT_DESCRIPTION,
)
from frigate.data_processing.common.license_plate.model import (
LicensePlateModelRunner,
)
@@ -42,7 +50,6 @@ from frigate.data_processing.post.audio_transcription import (
from frigate.data_processing.post.license_plate import (
LicensePlatePostProcessor,
)
from frigate.data_processing.post.object_descriptions import ObjectDescriptionProcessor
from frigate.data_processing.post.review_descriptions import ReviewDescriptionProcessor
from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor
from frigate.data_processing.real_time.api import RealTimeProcessorApi
@@ -60,8 +67,13 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import get_genai_client
from frigate.models import Event, Recordings, ReviewSegment, Trigger
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize
from frigate.util.image import SharedMemoryFrameManager
from frigate.util.image import (
SharedMemoryFrameManager,
calculate_region,
ensure_jpeg_bytes,
)
from frigate.util.path import get_event_thumbnail_bytes
from .embeddings import Embeddings
@@ -132,7 +144,7 @@ class EmbeddingMaintainer(threading.Thread):
EventMetadataTypeEnum.regenerate_description
)
self.recordings_subscriber = RecordingsDataSubscriber(
RecordingsDataTypeEnum.saved
RecordingsDataTypeEnum.recordings_available_through
)
self.review_subscriber = ReviewDataSubscriber("")
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video.value)
@@ -223,30 +235,20 @@ class EmbeddingMaintainer(threading.Thread):
AudioTranscriptionPostProcessor(self.config, self.requestor, metrics)
)
semantic_trigger_processor: SemanticTriggerProcessor | None = None
if self.config.semantic_search.enabled:
semantic_trigger_processor = SemanticTriggerProcessor(
db,
self.config,
self.requestor,
metrics,
self.embeddings,
)
self.post_processors.append(semantic_trigger_processor)
if any(c.objects.genai.enabled_in_config for c in self.config.cameras.values()):
self.post_processors.append(
ObjectDescriptionProcessor(
SemanticTriggerProcessor(
db,
self.config,
self.embeddings,
self.requestor,
self.metrics,
self.genai_client,
semantic_trigger_processor,
metrics,
self.embeddings,
)
)
self.stop_event = stop_event
self.tracked_events: dict[str, list[Any]] = {}
self.early_request_sent: dict[str, bool] = {}
# recordings data
self.recordings_available_through: dict[str, float] = {}
@@ -311,7 +313,6 @@ class EmbeddingMaintainer(threading.Thread):
if resp is not None:
return resp
logger.error(f"No processor handled the topic {topic}")
return None
except Exception as e:
logger.error(f"Unable to handle embeddings request {e}", exc_info=True)
@@ -335,8 +336,11 @@ class EmbeddingMaintainer(threading.Thread):
camera_config = self.config.cameras[camera]
# no need to process updated objects if no processors are active
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
# no need to process updated objects if face recognition, lpr, genai are disabled
if (
not camera_config.objects.genai.enabled
and len(self.realtime_processors) == 0
):
return
# Create our own thumbnail based on the bounding box and the frame time
@@ -356,17 +360,57 @@ class EmbeddingMaintainer(threading.Thread):
for processor in self.realtime_processors:
processor.process_frame(data, yuv_frame)
for processor in self.post_processors:
if isinstance(processor, ObjectDescriptionProcessor):
processor.process_data(
{
"camera": camera,
"data": data,
"state": "update",
"yuv_frame": yuv_frame,
},
PostProcessDataEnum.tracked_object,
)
# no need to save our own thumbnails if genai is not enabled
# or if the object has become stationary
if self.genai_client is not None and not data["stationary"]:
if data["id"] not in self.tracked_events:
self.tracked_events[data["id"]] = []
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
# Limit the number of thumbnails saved
if len(self.tracked_events[data["id"]]) >= MAX_THUMBNAILS:
# Always keep the first thumbnail for the event
self.tracked_events[data["id"]].pop(1)
self.tracked_events[data["id"]].append(data)
# check if we're configured to send an early request after a minimum number of updates received
if (
self.genai_client is not None
and camera_config.objects.genai.send_triggers.after_significant_updates
):
if (
len(self.tracked_events.get(data["id"], []))
>= camera_config.objects.genai.send_triggers.after_significant_updates
and data["id"] not in self.early_request_sent
):
if data["has_clip"] and data["has_snapshot"]:
event: Event = Event.get(Event.id == data["id"])
if (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
) and (
not camera_config.objects.genai.required_zones
or set(data["entered_zones"])
& set(camera_config.objects.genai.required_zones)
):
logger.debug(f"{camera} sending early request to GenAI")
self.early_request_sent[data["id"]] = True
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
[
data["thumbnail"]
for data in self.tracked_events[data["id"]]
],
),
).start()
self.frame_manager.close(frame_name)
@@ -379,13 +423,12 @@ class EmbeddingMaintainer(threading.Thread):
break
event_id, camera, updated_db = ended
camera_config = self.config.cameras[camera]
# expire in realtime processors
for processor in self.realtime_processors:
processor.expire_object(event_id, camera)
thumbnail: bytes | None = None
if updated_db:
try:
event: Event = Event.get(Event.id == event_id)
@@ -402,6 +445,23 @@ class EmbeddingMaintainer(threading.Thread):
# Embed the thumbnail
self._embed_thumbnail(event_id, thumbnail)
# Run GenAI
if (
camera_config.objects.genai.enabled
and camera_config.objects.genai.send_triggers.tracked_object_end
and self.genai_client is not None
and (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
)
and (
not camera_config.objects.genai.required_zones
or set(event.zones)
& set(camera_config.objects.genai.required_zones)
)
):
self._process_genai_description(event, camera_config, thumbnail)
# call any defined post processors
for processor in self.post_processors:
if isinstance(processor, LicensePlatePostProcessor):
@@ -431,25 +491,16 @@ class EmbeddingMaintainer(threading.Thread):
{"event_id": event_id, "camera": camera, "type": "image"},
PostProcessDataEnum.tracked_object,
)
elif isinstance(processor, ObjectDescriptionProcessor):
if not updated_db:
continue
processor.process_data(
{
"event": event,
"camera": camera,
"state": "finalize",
"thumbnail": thumbnail,
},
PostProcessDataEnum.tracked_object,
)
else:
processor.process_data(
{"event_id": event_id, "camera": camera},
PostProcessDataEnum.tracked_object,
)
# Delete tracked events based on the event_id
if event_id in self.tracked_events:
del self.tracked_events[event_id]
def _expire_dedicated_lpr(self) -> None:
"""Remove plates not seen for longer than expiration timeout for dedicated lpr cameras."""
now = datetime.datetime.now().timestamp()
@@ -473,28 +524,20 @@ class EmbeddingMaintainer(threading.Thread):
def _process_recordings_updates(self) -> None:
"""Process recordings updates."""
while True:
update = self.recordings_subscriber.check_for_update()
recordings_data = self.recordings_subscriber.check_for_update()
if not update:
if recordings_data == None:
break
(raw_topic, payload) = update
camera, recordings_available_through_timestamp = recordings_data
if not raw_topic or not payload:
break
self.recordings_available_through[camera] = (
recordings_available_through_timestamp
)
topic = str(raw_topic)
if topic.endswith(RecordingsDataTypeEnum.saved.value):
camera, recordings_available_through_timestamp, _ = payload
self.recordings_available_through[camera] = (
recordings_available_through_timestamp
)
logger.debug(
f"{camera} now has recordings available through {recordings_available_through_timestamp}"
)
logger.debug(
f"{camera} now has recordings available through {recordings_available_through_timestamp}"
)
def _process_review_updates(self) -> None:
"""Process review updates."""
@@ -518,16 +561,9 @@ class EmbeddingMaintainer(threading.Thread):
event_id, source, force = payload
if event_id:
for processor in self.post_processors:
if isinstance(processor, ObjectDescriptionProcessor):
processor.handle_request(
"regenerate_description",
{
"event_id": event_id,
"source": RegenerateDescriptionEnum(source),
"force": force,
},
)
self.handle_regenerate_description(
event_id, RegenerateDescriptionEnum(source), force
)
def _process_frame_updates(self) -> None:
"""Process event updates"""
@@ -577,9 +613,208 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager.close(frame_name)
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame."""
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
region = calculate_region(
frame.shape, box[0], box[1], box[2], box[3], height, multiplier=1.4
)
frame = frame[region[1] : region[3], region[0] : region[2]]
width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if ret:
return jpg.tobytes()
return None
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
"""Embed the thumbnail for an event."""
if not self.config.semantic_search.enabled:
return
self.embeddings.embed_thumbnail(event_id, thumbnail)
def _process_genai_description(
self, event: Event, camera_config: CameraConfig, thumbnail
) -> None:
if event.has_snapshot and camera_config.objects.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
return
num_thumbnails = len(self.tracked_events.get(event.id, []))
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.objects.genai.use_snapshot
else (
[data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.objects.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
parents=True, exist_ok=True
)
for idx, data in enumerate(self.tracked_events[event.id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(f"Unable to save thumbnail {idx} for {event.id}.")
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event."""
camera_config = self.config.cameras[event.camera]
description = self.genai_client.generate_object_description(
camera_config, thumbnails, event
)
if not description:
logger.debug("Failed to generate description for %s", event.id)
return
# fire and forget description update
self.requestor.send_data(
UPDATE_EVENT_DESCRIPTION,
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
"camera": event.camera,
},
)
# Embed the description
if self.config.semantic_search.enabled:
self.embeddings.embed_description(event.id, description)
# Check semantic trigger for this description
for processor in self.post_processors:
if isinstance(processor, SemanticTriggerProcessor):
processor.process_data(
{"event_id": event.id, "camera": event.camera, "type": "text"},
PostProcessDataEnum.tracked_object,
)
else:
continue
logger.debug(
"Generated description for %s (%d images): %s",
event.id,
len(thumbnails),
description,
)
def _read_and_crop_snapshot(self, event: Event, camera_config) -> bytes | None:
"""Read, decode, and crop the snapshot image."""
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}"
)
return None
try:
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# Crop snapshot based on region
# provide full image if region doesn't exist (manual events)
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data.get(
"region", [0, 0, 1, 1]
)
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
return buffer.tobytes()
except Exception:
return None
def handle_regenerate_description(
self, event_id: str, source: str, force: bool
) -> None:
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
logger.error(f"Event {event_id} not found for description regeneration")
return
if self.genai_client is None:
logger.error("GenAI not enabled")
return
camera_config = self.config.cameras[event.camera]
if not camera_config.objects.genai.enabled and not force:
logger.error(f"GenAI not enabled for camera {event.camera}")
return
thumbnail = get_event_thumbnail_bytes(event)
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
logger.debug(
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
)
if event.has_snapshot and source == "snapshot":
snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
return
embed_image = (
[snapshot_image]
if event.has_snapshot and source == "snapshot"
else (
[data["thumbnail"] for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail]
)
)
self._genai_embed_description(event, embed_image)

View File

@@ -27,12 +27,11 @@ FACENET_INPUT_SIZE = 160
class FaceNetEmbedding(BaseEmbedding):
def __init__(self):
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
super().__init__(
model_name="facedet",
model_file="facenet.tflite",
download_urls={
"facenet.tflite": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite",
"facenet.tflite": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite",
},
)
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
@@ -115,12 +114,11 @@ class FaceNetEmbedding(BaseEmbedding):
class ArcfaceEmbedding(BaseEmbedding):
def __init__(self, config: FaceRecognitionConfig):
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
super().__init__(
model_name="facedet",
model_file="arcface.onnx",
download_urls={
"arcface.onnx": f"{GITHUB_ENDPOINT}/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
"arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
},
)
self.config = config

View File

@@ -37,12 +37,11 @@ class PaddleOCRDetection(BaseEmbedding):
if model_size == "large"
else "detection_v5-small.onnx"
)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
super().__init__(
model_name="paddleocr-onnx",
model_file=model_file,
download_urls={
model_file: f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{'v3' if model_size == 'large' else 'v5'}/{model_file}"
model_file: f"https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/{'v3' if model_size == 'large' else 'v5'}/{model_file}"
},
)
self.requestor = requestor
@@ -98,12 +97,11 @@ class PaddleOCRClassification(BaseEmbedding):
requestor: InterProcessRequestor,
device: str = "AUTO",
):
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
super().__init__(
model_name="paddleocr-onnx",
model_file="classification.onnx",
download_urls={
"classification.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
},
)
self.requestor = requestor
@@ -159,13 +157,12 @@ class PaddleOCRRecognition(BaseEmbedding):
requestor: InterProcessRequestor,
device: str = "AUTO",
):
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
super().__init__(
model_name="paddleocr-onnx",
model_file="recognition_v4.onnx",
download_urls={
"recognition_v4.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/recognition_v4.onnx",
"ppocr_keys_v1.txt": f"{GITHUB_ENDPOINT}/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/ppocr_keys_v1.txt",
"recognition_v4.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/recognition_v4.onnx",
"ppocr_keys_v1.txt": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/v4/ppocr_keys_v1.txt",
},
)
self.requestor = requestor
@@ -221,12 +218,11 @@ class LicensePlateDetector(BaseEmbedding):
requestor: InterProcessRequestor,
device: str = "AUTO",
):
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
super().__init__(
model_name="yolov9_license_plate",
model_file="yolov9-256-license-plates.onnx",
download_urls={
"yolov9-256-license-plates.onnx": f"{GITHUB_ENDPOINT}/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
},
)
@@ -266,7 +262,7 @@ class LicensePlateDetector(BaseEmbedding):
self.runner = get_optimized_runner(
os.path.join(self.download_path, self.model_file),
self.device,
model_type=EnrichmentModelTypeEnum.yolov9_license_plate.value,
model_type="yolov9",
)
def _preprocess_inputs(self, raw_inputs):

View File

@@ -12,4 +12,3 @@ class EnrichmentModelTypeEnum(str, Enum):
jina_v1 = "jina_v1"
jina_v2 = "jina_v2"
paddleocr = "paddleocr"
yolov9_license_plate = "yolov9_license_plate"

View File

@@ -32,7 +32,7 @@ def register_genai_provider(key: GenAIProviderEnum):
class GenAIClient:
"""Generative AI client for Frigate."""
def __init__(self, genai_config: GenAIConfig, timeout: int = 120) -> None:
def __init__(self, genai_config: GenAIConfig, timeout: int = 60) -> None:
self.genai_config: GenAIConfig = genai_config
self.timeout = timeout
self.provider = self._init_provider()
@@ -44,7 +44,6 @@ class GenAIClient:
concerns: list[str],
preferred_language: str | None,
debug_save: bool,
activity_context_prompt: str,
) -> ReviewMetadata | None:
"""Generate a description for the review item activity."""
@@ -66,36 +65,29 @@ class GenAIClient:
context_prompt = f"""
Please analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
**Normal activity patterns for this property:**
{activity_context_prompt}
Your task is to provide a clear, accurate description of the scene that:
Your task is to provide a clear, security-focused description of the scene that:
1. States exactly what is happening based on observable actions and movements.
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
2. Identifies and emphasizes behaviors that match patterns of suspicious activity.
3. Assigns a potential_threat_level based on the definitions below, applying them consistently.
**IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.**
Facts come first, but identifying security risks is the primary goal.
When forming your description:
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
- Describe what you observe: actions, movements, interactions with objects and the environment. Include any observable environmental changes (e.g., lighting changes triggered by activity).
- Note visible details such as clothing, items being carried or placed, tools or equipment present, and how they interact with the property or objects.
- Consider the full sequence chronologically: what happens from start to finish, how duration and actions relate to the location and objects involved.
- **Use the actual timestamp provided in "Activity started at"** below for time of day context—do not infer time from image brightness or darkness. Unusual hours (late night/early morning) should increase suspicion when the observable behavior itself appears questionable. However, recognize that some legitimate activities can occur at any hour.
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
- **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention.
- Describe the time, people, and objects exactly as seen. Include any observable environmental changes (e.g., lighting changes triggered by activity).
- Time of day should **increase suspicion only when paired with unusual or security-relevant behaviors**. Do not raise the threat level for common residential activities (e.g., residents walking pets, retrieving mail, gardening, playing with pets, supervising children) even at unusual hours, unless other suspicious indicators are present.
- Focus on behaviors that are uncharacteristic of innocent activity: loitering without clear purpose, avoiding cameras, inspecting vehicles/doors, changing behavior when lights activate, scanning surroundings without an apparent benign reason.
- **Benign context override**: If scanning or looking around is clearly part of an innocent activity (such as playing with a dog, gardening, supervising children, or watching for a pet), do not treat it as suspicious.
Your response MUST be a flat JSON object with:
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
- `scene` (string): A full description including setting, entities, actions, and any plausible supported inferences.
- `confidence` (float): 0-1 confidence in the analysis.
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
{get_concern_prompt()}
Threat-level definitions:
- 0 — **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidence—considering zone context, detected objects, and timing together—supports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
- 1 — **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.**
- 2 — **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
- 0 — Typical or expected activity for this location/time (includes residents, guests, or known animals engaged in normal activities, even if they glance around or scan surroundings).
- 1 — Unusual or suspicious activity: At least one security-relevant behavior is present **and not explainable by a normal residential activity**.
- 2 — Active or immediate threat: Breaking in, vandalism, aggression, weapon display.
Sequence details:
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
@@ -106,9 +98,8 @@ Sequence details:
**IMPORTANT:**
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
{get_language_prompt()}
"""
"""
logger.debug(
f"Sending {len(thumbnails)} images to create review description on {review_data['camera']}"
)
@@ -144,7 +135,6 @@ Sequence details:
if review_data["recognized_objects"]:
metadata.potential_threat_level = 0
metadata.time = review_data["start"]
return metadata
except Exception as e:
# rarely LLMs can fail to follow directions on output format
@@ -156,75 +146,34 @@ Sequence details:
return None
def generate_review_summary(
self,
start_ts: float,
end_ts: float,
segments: list[dict[str, Any]],
debug_save: bool,
self, start_ts: float, end_ts: float, segments: list[dict[str, Any]]
) -> str | None:
"""Generate a summary of review item descriptions over a period of time."""
time_range = f"{datetime.datetime.fromtimestamp(start_ts).strftime('%B %d, %Y at %I:%M %p')} to {datetime.datetime.fromtimestamp(end_ts).strftime('%B %d, %Y at %I:%M %p')}"
time_range = f"{datetime.datetime.fromtimestamp(start_ts).strftime('%I:%M %p')} to {datetime.datetime.fromtimestamp(end_ts).strftime('%I:%M %p')}"
timeline_summary_prompt = f"""
You are a security officer.
Time range: {time_range}.
You are a security officer. Time range: {time_range}.
Input: JSON list with "scene", "confidence", "potential_threat_level" (1-2), "other_concerns".
Write a report:
Task: Write a concise, human-presentable security report in markdown format.
Security Summary - {time_range}
[One-sentence overview of activity]
[Chronological bullet list of events with timestamps if in scene]
[Final threat assessment]
Rules for the report:
- Title & overview
- Start with:
# Security Summary - {time_range}
- Write a 1-2 sentence situational overview capturing the general pattern of the period.
- Event details
- Present events in chronological order as a bullet list.
- **If multiple events occur within the same minute or overlapping time range, COMBINE them into a single bullet.**
- Summarize the distinct activities as sub-points under the shared timestamp.
- If no timestamp is given, preserve order but label as “Time not specified.”
- Use bold timestamps for clarity.
- Group bullets under subheadings when multiple events fall into the same category (e.g., Vehicle Activity, Porch Activity, Unusual Behavior).
- Threat levels
- Always show (threat level: X) for each event.
- If multiple events at the same time share the same threat level, only state it once.
- Final assessment
- End with a Final Assessment section.
- If all events are threat level 1 with no escalation:
Final assessment: Only normal residential activity observed during this period.
- If threat level 2+ events are present, clearly summarize them as Potential concerns requiring review.
- Conciseness
- Do not repeat benign clothing/appearance details unless they distinguish individuals.
- Summarize similar routine events instead of restating full scene descriptions.
"""
Rules:
- List events in order.
- Highlight potential_threat_level ≥ 1 with exact times.
- Note any of the additional concerns which are present.
- Note unusual activity even if not threats.
- If no threats: "Final assessment: Only normal activity observed during this period."
- No commentary, questions, or recommendations.
- Output only the report.
"""
for item in segments:
timeline_summary_prompt += f"\n{item}"
if debug_save:
with open(
os.path.join(
CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}", "prompt.txt"
),
"w",
) as f:
f.write(timeline_summary_prompt)
response = self._send(timeline_summary_prompt, [])
if debug_save and response:
with open(
os.path.join(
CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}", "response.txt"
),
"w",
) as f:
f.write(response)
return response
return self._send(timeline_summary_prompt, [])
def generate_object_description(
self,
@@ -234,9 +183,9 @@ Rules for the report:
) -> Optional[str]:
"""Generate a description for the frame."""
try:
prompt = camera_config.objects.genai.object_prompts.get(
prompt = camera_config.genai.object_prompts.get(
event.label,
camera_config.objects.genai.prompt,
camera_config.genai.prompt,
).format(**model_to_dict(event))
except KeyError as e:
logger.error(f"Invalid key in GenAI prompt: {e}")
@@ -253,10 +202,6 @@ Rules for the report:
"""Submit a request to the provider."""
return None
def get_context_size(self) -> int:
"""Get the context window size for this provider in tokens."""
return 4096
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
"""Get the GenAI client."""

View File

@@ -71,7 +71,3 @@ class OpenAIClient(GenAIClient):
if len(result.choices) > 0:
return result.choices[0].message.content.strip()
return None
def get_context_size(self) -> int:
"""Get the context window size for Azure OpenAI."""
return 128000

View File

@@ -53,8 +53,3 @@ class GeminiClient(GenAIClient):
# No description was generated
return None
return description
def get_context_size(self) -> int:
"""Get the context window size for Gemini."""
# Gemini Pro Vision has a 1M token context window
return 1000000

View File

@@ -54,9 +54,3 @@ class OllamaClient(GenAIClient):
except (TimeoutException, ResponseError) as e:
logger.warning("Ollama returned an error: %s", str(e))
return None
def get_context_size(self) -> int:
"""Get the context window size for Ollama."""
return self.genai_config.provider_options.get("options", {}).get(
"num_ctx", 4096
)

View File

@@ -66,8 +66,3 @@ class OpenAIClient(GenAIClient):
except (TimeoutException, Exception) as e:
logger.warning("OpenAI returned an error: %s", str(e))
return None
def get_context_size(self) -> int:
"""Get the context window size for OpenAI."""
# OpenAI GPT-4 Vision models have 128K token context window
return 128000

View File

@@ -80,7 +80,9 @@ class RecordingMaintainer(threading.Thread):
[CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.record],
)
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all.value)
self.recordings_publisher = RecordingsDataPublisher()
self.recordings_publisher = RecordingsDataPublisher(
RecordingsDataTypeEnum.recordings_available_through
)
self.stop_event = stop_event
self.object_recordings_info: dict[str, list] = defaultdict(list)
@@ -96,41 +98,6 @@ class RecordingMaintainer(threading.Thread):
and not d.startswith("preview_")
]
# publish newest cached segment per camera (including in use files)
newest_cache_segments: dict[str, dict[str, Any]] = {}
for cache in cache_files:
cache_path = os.path.join(CACHE_DIR, cache)
basename = os.path.splitext(cache)[0]
camera, date = basename.rsplit("@", maxsplit=1)
start_time = datetime.datetime.strptime(
date, CACHE_SEGMENT_FORMAT
).astimezone(datetime.timezone.utc)
if (
camera not in newest_cache_segments
or start_time > newest_cache_segments[camera]["start_time"]
):
newest_cache_segments[camera] = {
"start_time": start_time,
"cache_path": cache_path,
}
for camera, newest in newest_cache_segments.items():
self.recordings_publisher.publish(
(
camera,
newest["start_time"].timestamp(),
newest["cache_path"],
),
RecordingsDataTypeEnum.latest.value,
)
# publish None for cameras with no cache files (but only if we know the camera exists)
for camera_name in self.config.cameras:
if camera_name not in newest_cache_segments:
self.recordings_publisher.publish(
(camera_name, None, None),
RecordingsDataTypeEnum.latest.value,
)
files_in_use = []
for process in psutil.process_iter():
try:
@@ -144,7 +111,7 @@ class RecordingMaintainer(threading.Thread):
except psutil.Error:
continue
# group recordings by camera (skip in-use for validation/moving)
# group recordings by camera
grouped_recordings: defaultdict[str, list[dict[str, Any]]] = defaultdict(list)
for cache in cache_files:
# Skip files currently in use
@@ -266,9 +233,7 @@ class RecordingMaintainer(threading.Thread):
recordings[0]["start_time"].timestamp()
if self.config.cameras[camera].record.enabled
else None,
None,
),
RecordingsDataTypeEnum.saved.value,
)
)
recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
@@ -285,7 +250,7 @@ class RecordingMaintainer(threading.Thread):
async def validate_and_move_segment(
self, camera: str, reviews: list[ReviewSegment], recording: dict[str, Any]
) -> Optional[Recordings]:
) -> None:
cache_path: str = recording["cache_path"]
start_time: datetime.datetime = recording["start_time"]
record_config = self.config.cameras[camera].record
@@ -296,7 +261,7 @@ class RecordingMaintainer(threading.Thread):
or not self.config.cameras[camera].record.enabled
):
self.drop_segment(cache_path)
return None
return
if cache_path in self.end_time_cache:
end_time, duration = self.end_time_cache[cache_path]
@@ -305,18 +270,10 @@ class RecordingMaintainer(threading.Thread):
self.config.ffmpeg, cache_path, get_duration=True
)
if not segment_info.get("has_valid_video", False):
logger.warning(
f"Invalid or missing video stream in segment {cache_path}. Discarding."
)
self.recordings_publisher.publish(
(camera, start_time.timestamp(), cache_path),
RecordingsDataTypeEnum.invalid.value,
)
self.drop_segment(cache_path)
return None
duration = float(segment_info.get("duration", -1))
if segment_info["duration"]:
duration = float(segment_info["duration"])
else:
duration = -1
# ensure duration is within expected length
if 0 < duration < MAX_SEGMENT_DURATION:
@@ -327,18 +284,8 @@ class RecordingMaintainer(threading.Thread):
logger.warning(f"Failed to probe corrupt segment {cache_path}")
logger.warning(f"Discarding a corrupt recording segment: {cache_path}")
self.recordings_publisher.publish(
(camera, start_time.timestamp(), cache_path),
RecordingsDataTypeEnum.invalid.value,
)
self.drop_segment(cache_path)
return None
# this segment has a valid duration and has video data, so publish an update
self.recordings_publisher.publish(
(camera, start_time.timestamp(), cache_path),
RecordingsDataTypeEnum.valid.value,
)
Path(cache_path).unlink(missing_ok=True)
return
record_config = self.config.cameras[camera].record
highest = None

View File

@@ -361,14 +361,6 @@ def stats_snapshot(
embeddings_metrics.review_desc_dps.value, 2
)
if embeddings_metrics.object_desc_speed.value > 0.0:
stats["embeddings"]["object_description_speed"] = round(
embeddings_metrics.object_desc_speed.value * 1000, 2
)
stats["embeddings"]["object_descriptions"] = round(
embeddings_metrics.object_desc_dps.value, 2
)
for key in embeddings_metrics.classification_speeds.keys():
stats["embeddings"][f"{key}_classification_speed"] = round(
embeddings_metrics.classification_speeds[key].value * 1000, 2

View File

@@ -1,7 +1,7 @@
import logging
import random
import string
from typing import Any, Sequence, cast
from typing import Any, Sequence
import cv2
import numpy as np
@@ -17,11 +17,6 @@ from frigate.camera import PTZMetrics
from frigate.config import CameraConfig
from frigate.ptz.autotrack import PtzMotionEstimator
from frigate.track import ObjectTracker
from frigate.track.stationary_classifier import (
StationaryMotionClassifier,
StationaryThresholds,
get_stationary_threshold,
)
from frigate.util.image import (
SharedMemoryFrameManager,
get_histogram,
@@ -32,6 +27,12 @@ from frigate.util.object import average_boxes, median_of_boxes
logger = logging.getLogger(__name__)
THRESHOLD_KNOWN_ACTIVE_IOU = 0.2
THRESHOLD_STATIONARY_CHECK_IOU = 0.6
THRESHOLD_ACTIVE_CHECK_IOU = 0.9
MAX_STATIONARY_HISTORY = 10
# Normalizes distance from estimate relative to object size
# Other ideas:
# - if estimates are inaccurate for first N detections, compare with last_detection (may be fine)
@@ -118,7 +119,6 @@ class NorfairTracker(ObjectTracker):
self.ptz_motion_estimator: PtzMotionEstimator | None = None
self.camera_name = config.name
self.track_id_map: dict[str, str] = {}
self.stationary_classifier = StationaryMotionClassifier()
# Define tracker configurations for static camera
self.object_type_configs = {
@@ -321,15 +321,23 @@ class NorfairTracker(ObjectTracker):
# tracks the current position of the object based on the last N bounding boxes
# returns False if the object has moved outside its previous position
def update_position(
self,
id: str,
box: list[int],
stationary: bool,
thresholds: StationaryThresholds,
yuv_frame: np.ndarray | None,
) -> bool:
def reset_position(xmin: int, ymin: int, xmax: int, ymax: int) -> None:
def update_position(self, id: str, box: list[int], stationary: bool) -> bool:
xmin, ymin, xmax, ymax = box
position = self.positions[id]
self.stationary_box_history[id].append(box)
if len(self.stationary_box_history[id]) > MAX_STATIONARY_HISTORY:
self.stationary_box_history[id] = self.stationary_box_history[id][
-MAX_STATIONARY_HISTORY:
]
avg_iou = intersection_over_union(
box, average_boxes(self.stationary_box_history[id])
)
# object has minimal or zero iou
# assume object is active
if avg_iou < THRESHOLD_KNOWN_ACTIVE_IOU:
self.positions[id] = {
"xmins": [xmin],
"ymins": [ymin],
@@ -340,50 +348,13 @@ class NorfairTracker(ObjectTracker):
"xmax": xmax,
"ymax": ymax,
}
xmin, ymin, xmax, ymax = box
position = self.positions[id]
self.stationary_box_history[id].append(box)
if len(self.stationary_box_history[id]) > thresholds.max_stationary_history:
self.stationary_box_history[id] = self.stationary_box_history[id][
-thresholds.max_stationary_history :
]
avg_box = average_boxes(self.stationary_box_history[id])
avg_iou = intersection_over_union(box, avg_box)
median_box = median_of_boxes(self.stationary_box_history[id])
# Establish anchor early when stationary and stable
if stationary and yuv_frame is not None:
history = self.stationary_box_history[id]
if id not in self.stationary_classifier.anchor_crops and len(history) >= 5:
stability_iou = intersection_over_union(avg_box, median_box)
if stability_iou >= 0.7:
self.stationary_classifier.ensure_anchor(
id, yuv_frame, cast(tuple[int, int, int, int], median_box)
)
# object has minimal or zero iou
# assume object is active
if avg_iou < thresholds.known_active_iou:
if stationary and yuv_frame is not None:
if not self.stationary_classifier.evaluate(
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
):
reset_position(xmin, ymin, xmax, ymax)
return False
else:
reset_position(xmin, ymin, xmax, ymax)
return False
return False
threshold = (
thresholds.stationary_check_iou
if stationary
else thresholds.active_check_iou
THRESHOLD_STATIONARY_CHECK_IOU if stationary else THRESHOLD_ACTIVE_CHECK_IOU
)
# object has iou below threshold, check median and optionally crop similarity
# object has iou below threshold, check median to reduce outliers
if avg_iou < threshold:
median_iou = intersection_over_union(
(
@@ -392,26 +363,27 @@ class NorfairTracker(ObjectTracker):
position["xmax"],
position["ymax"],
),
median_box,
median_of_boxes(self.stationary_box_history[id]),
)
# if the median iou drops below the threshold
# assume object is no longer stationary
if median_iou < threshold:
# If we have a yuv_frame to check before flipping to active, check with classifier if we have YUV frame
if stationary and yuv_frame is not None:
if not self.stationary_classifier.evaluate(
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
):
reset_position(xmin, ymin, xmax, ymax)
return False
else:
reset_position(xmin, ymin, xmax, ymax)
return False
self.positions[id] = {
"xmins": [xmin],
"ymins": [ymin],
"xmaxs": [xmax],
"ymaxs": [ymax],
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return False
# if there are more than 5 and less than 10 entries for the position, add the bounding box
# and recompute the position box
if len(position["xmins"]) < 10:
if 5 <= len(position["xmins"]) < 10:
position["xmins"].append(xmin)
position["ymins"].append(ymin)
position["xmaxs"].append(xmax)
@@ -444,13 +416,7 @@ class NorfairTracker(ObjectTracker):
return False
def update(
self,
track_id: str,
obj: dict[str, Any],
thresholds: StationaryThresholds,
yuv_frame: np.ndarray | None,
) -> None:
def update(self, track_id: str, obj: dict[str, Any]) -> None:
id = self.track_id_map[track_id]
self.disappeared[id] = 0
stationary = (
@@ -458,7 +424,7 @@ class NorfairTracker(ObjectTracker):
>= self.detect_config.stationary.threshold
)
# update the motionless count if the object has not moved to a new position
if self.update_position(id, obj["box"], stationary, thresholds, yuv_frame):
if self.update_position(id, obj["box"], stationary):
self.tracked_objects[id]["motionless_count"] += 1
if self.is_expired(id):
self.deregister(id, track_id)
@@ -474,7 +440,6 @@ class NorfairTracker(ObjectTracker):
self.tracked_objects[id]["position_changes"] += 1
self.tracked_objects[id]["motionless_count"] = 0
self.stationary_box_history[id] = []
self.stationary_classifier.on_active(id)
self.tracked_objects[id].update(obj)
@@ -502,15 +467,6 @@ class NorfairTracker(ObjectTracker):
) -> None:
# Group detections by object type
detections_by_type: dict[str, list[Detection]] = {}
yuv_frame: np.ndarray | None = None
if (
self.ptz_metrics.autotracker_enabled.value
or self.detect_config.stationary.classifier
):
yuv_frame = self.frame_manager.get(
frame_name, self.camera_config.frame_shape_yuv
)
for obj in detections:
label = obj[0]
if label not in detections_by_type:
@@ -525,6 +481,9 @@ class NorfairTracker(ObjectTracker):
embedding = None
if self.ptz_metrics.autotracker_enabled.value:
yuv_frame = self.frame_manager.get(
frame_name, self.camera_config.frame_shape_yuv
)
embedding = get_histogram(
yuv_frame, obj[2][0], obj[2][1], obj[2][2], obj[2][3]
)
@@ -616,13 +575,7 @@ class NorfairTracker(ObjectTracker):
self.tracked_objects[id]["estimate"] = new_obj["estimate"]
# else update it
else:
thresholds = get_stationary_threshold(new_obj["label"])
self.update(
str(t.global_id),
new_obj,
thresholds,
yuv_frame if thresholds.motion_classifier_enabled else None,
)
self.update(str(t.global_id), new_obj)
# clear expired tracks
expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids]

View File

@@ -1,254 +0,0 @@
"""Tools for determining if an object is stationary."""
import logging
from dataclasses import dataclass, field
from typing import Any, cast
import cv2
import numpy as np
from scipy.ndimage import gaussian_filter
logger = logging.getLogger(__name__)
@dataclass
class StationaryThresholds:
"""IOU thresholds and history parameters for stationary object classification.
This allows different sensitivity settings for different object types.
"""
# Objects to apply these thresholds to
# If None, apply to all objects
objects: list[str] = field(default_factory=list)
# Threshold of IoU that causes the object to immediately be considered active
# Below this threshold, assume object is active
known_active_iou: float = 0.2
# IOU threshold for checking if stationary object has moved
# If mean and median IOU drops below this, assume object is no longer stationary
stationary_check_iou: float = 0.6
# IOU threshold for checking if active object has changed position
# Higher threshold makes it more difficult for the object to be considered stationary
active_check_iou: float = 0.9
# Maximum number of bounding boxes to keep in stationary history
max_stationary_history: int = 10
# Whether to use the motion classifier
motion_classifier_enabled: bool = False
# Thresholds for objects that are expected to be stationary
STATIONARY_OBJECT_THRESHOLDS = StationaryThresholds(
objects=["bbq_grill", "package", "waste_bin"],
known_active_iou=0.0,
motion_classifier_enabled=True,
)
# Thresholds for objects that are active but can be stationary for longer periods of time
DYNAMIC_OBJECT_THRESHOLDS = StationaryThresholds(
objects=["bicycle", "boat", "car", "motorcycle", "tractor", "truck"],
active_check_iou=0.75,
motion_classifier_enabled=True,
)
def get_stationary_threshold(label: str) -> StationaryThresholds:
"""Get the stationary thresholds for a given object label."""
if label in STATIONARY_OBJECT_THRESHOLDS.objects:
return STATIONARY_OBJECT_THRESHOLDS
if label in DYNAMIC_OBJECT_THRESHOLDS.objects:
return DYNAMIC_OBJECT_THRESHOLDS
return StationaryThresholds()
class StationaryMotionClassifier:
"""Fallback classifier to prevent false flips from stationary to active.
Uses appearance consistency on a fixed spatial region (historical median box)
to detect actual movement, ignoring bounding box detection variations.
"""
CROP_SIZE = 96
NCC_KEEP_THRESHOLD = 0.90 # High correlation = keep stationary
NCC_ACTIVE_THRESHOLD = 0.85 # Low correlation = consider active
SHIFT_KEEP_THRESHOLD = 0.02 # Small shift = keep stationary
SHIFT_ACTIVE_THRESHOLD = 0.04 # Large shift = consider active
DRIFT_ACTIVE_THRESHOLD = 0.12 # Cumulative drift over 5 frames
CHANGED_FRAMES_TO_FLIP = 2
def __init__(self) -> None:
self.anchor_crops: dict[str, np.ndarray] = {}
self.anchor_boxes: dict[str, tuple[int, int, int, int]] = {}
self.changed_counts: dict[str, int] = {}
self.shift_histories: dict[str, list[float]] = {}
# Pre-compute Hanning window for phase correlation
hann = np.hanning(self.CROP_SIZE).astype(np.float64)
self._hann2d = np.outer(hann, hann)
def reset(self, id: str) -> None:
logger.debug("StationaryMotionClassifier.reset: id=%s", id)
if id in self.anchor_crops:
del self.anchor_crops[id]
if id in self.anchor_boxes:
del self.anchor_boxes[id]
self.changed_counts[id] = 0
self.shift_histories[id] = []
def _extract_y_crop(
self, yuv_frame: np.ndarray, box: tuple[int, int, int, int]
) -> np.ndarray:
"""Extract and normalize Y-plane crop from bounding box."""
y_height = yuv_frame.shape[0] // 3 * 2
width = yuv_frame.shape[1]
x1 = max(0, min(width - 1, box[0]))
y1 = max(0, min(y_height - 1, box[1]))
x2 = max(0, min(width - 1, box[2]))
y2 = max(0, min(y_height - 1, box[3]))
if x2 <= x1:
x2 = min(width - 1, x1 + 1)
if y2 <= y1:
y2 = min(y_height - 1, y1 + 1)
# Extract Y-plane crop, resize, and blur
y_plane = yuv_frame[0:y_height, 0:width]
crop = y_plane[y1:y2, x1:x2]
crop_resized = cv2.resize(
crop, (self.CROP_SIZE, self.CROP_SIZE), interpolation=cv2.INTER_AREA
)
result = cast(np.ndarray[Any, Any], gaussian_filter(crop_resized, sigma=0.5))
logger.debug(
"_extract_y_crop: box=%s clamped=(%d,%d,%d,%d) crop_shape=%s",
box,
x1,
y1,
x2,
y2,
crop.shape if "crop" in locals() else None,
)
return result
def ensure_anchor(
self, id: str, yuv_frame: np.ndarray, median_box: tuple[int, int, int, int]
) -> None:
"""Initialize anchor crop from stable median box when object becomes stationary."""
if id not in self.anchor_crops:
self.anchor_boxes[id] = median_box
self.anchor_crops[id] = self._extract_y_crop(yuv_frame, median_box)
self.changed_counts[id] = 0
self.shift_histories[id] = []
logger.debug(
"ensure_anchor: initialized id=%s median_box=%s crop_shape=%s",
id,
median_box,
self.anchor_crops[id].shape,
)
def on_active(self, id: str) -> None:
"""Reset state when object becomes active to allow re-anchoring."""
logger.debug("on_active: id=%s became active; resetting state", id)
self.reset(id)
def evaluate(
self, id: str, yuv_frame: np.ndarray, current_box: tuple[int, int, int, int]
) -> bool:
"""Return True to keep stationary, False to flip to active.
Compares the same spatial region (historical median box) across frames
to detect actual movement, ignoring bounding box variations.
"""
if id not in self.anchor_crops or id not in self.anchor_boxes:
logger.debug("evaluate: id=%s has no anchor; default keep stationary", id)
return True
# Compare same spatial region across frames
anchor_box = self.anchor_boxes[id]
anchor_crop = self.anchor_crops[id]
curr_crop = self._extract_y_crop(yuv_frame, anchor_box)
# Compute appearance and motion metrics
ncc = cv2.matchTemplate(curr_crop, anchor_crop, cv2.TM_CCOEFF_NORMED)[0, 0]
a64 = anchor_crop.astype(np.float64) * self._hann2d
c64 = curr_crop.astype(np.float64) * self._hann2d
(shift_x, shift_y), _ = cv2.phaseCorrelate(a64, c64)
shift_norm = float(np.hypot(shift_x, shift_y)) / float(self.CROP_SIZE)
logger.debug(
"evaluate: id=%s metrics ncc=%.4f shift_norm=%.4f (shift_x=%.3f, shift_y=%.3f)",
id,
float(ncc),
shift_norm,
float(shift_x),
float(shift_y),
)
# Update rolling shift history
history = self.shift_histories.get(id, [])
history.append(shift_norm)
if len(history) > 5:
history = history[-5:]
self.shift_histories[id] = history
drift_sum = float(sum(history))
logger.debug(
"evaluate: id=%s history_len=%d last_shift=%.4f drift_sum=%.4f",
id,
len(history),
history[-1] if history else -1.0,
drift_sum,
)
# Early exit for clear stationary case
if ncc >= self.NCC_KEEP_THRESHOLD and shift_norm < self.SHIFT_KEEP_THRESHOLD:
self.changed_counts[id] = 0
logger.debug(
"evaluate: id=%s early-stationary keep=True (ncc>=%.2f and shift<%.2f)",
id,
self.NCC_KEEP_THRESHOLD,
self.SHIFT_KEEP_THRESHOLD,
)
return True
# Check for movement indicators
movement_detected = (
ncc < self.NCC_ACTIVE_THRESHOLD
or shift_norm >= self.SHIFT_ACTIVE_THRESHOLD
or drift_sum >= self.DRIFT_ACTIVE_THRESHOLD
)
if movement_detected:
cnt = self.changed_counts.get(id, 0) + 1
self.changed_counts[id] = cnt
if (
cnt >= self.CHANGED_FRAMES_TO_FLIP
or drift_sum >= self.DRIFT_ACTIVE_THRESHOLD
):
logger.debug(
"evaluate: id=%s flip_to_active=True cnt=%d drift_sum=%.4f thresholds(changed>=%d drift>=%.2f)",
id,
cnt,
drift_sum,
self.CHANGED_FRAMES_TO_FLIP,
self.DRIFT_ACTIVE_THRESHOLD,
)
return False
logger.debug(
"evaluate: id=%s movement_detected cnt=%d keep_until_cnt>=%d",
id,
cnt,
self.CHANGED_FRAMES_TO_FLIP,
)
else:
self.changed_counts[id] = 0
logger.debug("evaluate: id=%s no_movement keep=True", id)
return True

View File

@@ -995,26 +995,7 @@ def get_histogram(image, x_min, y_min, x_max, y_max):
return cv2.normalize(hist, hist).flatten()
def create_thumbnail(
yuv_frame: np.ndarray, box: tuple[int, int, int, int], height=500
) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame."""
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
region = calculate_region(
frame.shape, box[0], box[1], box[2], box[3], height, multiplier=1.4
)
frame = frame[region[1] : region[3], region[0] : region[2]]
width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if ret:
return jpg.tobytes()
return None
def ensure_jpeg_bytes(image_data: bytes) -> bytes:
def ensure_jpeg_bytes(image_data):
"""Ensure image data is jpeg bytes for genai"""
try:
img_array = np.frombuffer(image_data, dtype=np.uint8)

View File

@@ -284,9 +284,7 @@ def post_process_yolox(
def get_ort_providers(
force_cpu: bool = False,
device: str | None = "AUTO",
requires_fp16: bool = False,
force_cpu: bool = False, device: str | None = "AUTO", requires_fp16: bool = False
) -> tuple[list[str], list[dict[str, Any]]]:
if force_cpu:
return (
@@ -353,15 +351,12 @@ def get_ort_providers(
}
)
elif provider == "MIGraphXExecutionProvider":
migraphx_cache_dir = os.path.join(MODEL_CACHE_DIR, "migraphx")
os.makedirs(migraphx_cache_dir, exist_ok=True)
providers.append(provider)
options.append(
{
"migraphx_model_cache_dir": migraphx_cache_dir,
}
)
# MIGraphX uses more CPU than ROCM, while also being the same speed
if device == "MIGraphX":
providers.append(provider)
options.append({})
else:
continue
elif provider == "CPUExecutionProvider":
providers.append(provider)
options.append(

View File

@@ -269,20 +269,7 @@ def is_object_filtered(obj, objects_to_track, object_filters):
def get_min_region_size(model_config: ModelConfig) -> int:
"""Get the min region size."""
largest_dimension = max(model_config.height, model_config.width)
if largest_dimension > 320:
# We originally tested allowing any model to have a region down to half of the model size
# but this led to many false positives. In this case we specifically target larger models
# which can benefit from a smaller region in some cases to detect smaller objects.
half = int(largest_dimension / 2)
if half % 4 == 0:
return half
return int((half + 3) / 4) * 4
return largest_dimension
return max(model_config.height, model_config.width)
def create_tensor_input(frame, model_config: ModelConfig, region):

View File

@@ -303,7 +303,7 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s
"-o",
"-",
"-s",
"1000", # Intel changed this from seconds to milliseconds in 2024+ versions
"1",
]
if intel_gpu_device:
@@ -603,87 +603,87 @@ def auto_detect_hwaccel() -> str:
async def get_video_properties(
ffmpeg, url: str, get_duration: bool = False
) -> dict[str, Any]:
async def probe_with_ffprobe(
url: str,
) -> tuple[bool, int, int, Optional[str], float]:
"""Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
cmd = [
ffmpeg.ffprobe_path,
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
"-show_streams",
url,
]
try:
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
async def calculate_duration(video: Optional[Any]) -> float:
duration = None
if video is not None:
# Get the frames per second (fps) of the video stream
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if fps and total_frames:
duration = total_frames / fps
# if cv2 failed need to use ffprobe
if duration is None:
p = await asyncio.create_subprocess_exec(
ffmpeg.ffprobe_path,
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{url}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, _ = await proc.communicate()
if proc.returncode != 0:
return False, 0, 0, None, -1
await p.wait()
data = json.loads(stdout.decode())
video_streams = [
s for s in data.get("streams", []) if s.get("codec_type") == "video"
]
if not video_streams:
return False, 0, 0, None, -1
if p.returncode == 0:
result = (await p.stdout.read()).decode()
else:
result = None
v = video_streams[0]
width = int(v.get("width", 0))
height = int(v.get("height", 0))
codec = v.get("codec_name")
if result:
try:
duration = float(result.strip())
except ValueError:
duration = -1
else:
duration = -1
duration_str = data.get("format", {}).get("duration")
duration = float(duration_str) if duration_str else -1.0
return duration
return True, width, height, codec, duration
except (json.JSONDecodeError, ValueError, KeyError, asyncio.SubprocessError):
return False, 0, 0, None, -1
width = height = 0
def probe_with_cv2(url: str) -> tuple[bool, int, int, Optional[str], float]:
"""Primary attempt using cv2: returns (valid, width, height, fourcc, duration)."""
cap = cv2.VideoCapture(url)
if not cap.isOpened():
cap.release()
return False, 0, 0, None, -1
try:
# Open the video stream using OpenCV
video = cv2.VideoCapture(url)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
valid = width > 0 and height > 0
fourcc = None
duration = -1.0
# Check if the video stream was opened successfully
if not video.isOpened():
video = None
except Exception:
video = None
if valid:
fourcc_int = int(cap.get(cv2.CAP_PROP_FOURCC))
fourcc = fourcc_int.to_bytes(4, "little").decode("latin-1").strip()
result = {}
if get_duration:
fps = cap.get(cv2.CAP_PROP_FPS)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if fps > 0 and total_frames > 0:
duration = total_frames / fps
cap.release()
return valid, width, height, fourcc, duration
# try cv2 first
has_video, width, height, fourcc, duration = probe_with_cv2(url)
# fallback to ffprobe if needed
if not has_video or (get_duration and duration < 0):
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
result: dict[str, Any] = {"has_valid_video": has_video}
if has_video:
result.update({"width": width, "height": height})
if fourcc:
result["fourcc"] = fourcc
if get_duration:
result["duration"] = duration
result["duration"] = await calculate_duration(video)
if video is not None:
# Get the width of frames in the video stream
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# Get the height of frames in the video stream
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Get the stream encoding
fourcc_int = int(video.get(cv2.CAP_PROP_FOURCC))
fourcc = (
chr((fourcc_int >> 0) & 255)
+ chr((fourcc_int >> 8) & 255)
+ chr((fourcc_int >> 16) & 255)
+ chr((fourcc_int >> 24) & 255)
)
# Release the video stream
video.release()
result["width"] = round(width)
result["height"] = round(height)
result["fourcc"] = fourcc
return result

View File

@@ -1,9 +1,10 @@
import datetime
import logging
import os
import queue
import subprocess as sp
import threading
import time
from datetime import datetime, timedelta, timezone
from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
@@ -12,10 +13,6 @@ import cv2
from frigate.camera import CameraMetrics, PTZMetrics
from frigate.comms.inter_process import InterProcessRequestor
from frigate.comms.recordings_updater import (
RecordingsDataSubscriber,
RecordingsDataTypeEnum,
)
from frigate.config import CameraConfig, DetectConfig, ModelConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.config.camera.updater import (
@@ -23,6 +20,8 @@ from frigate.config.camera.updater import (
CameraConfigUpdateSubscriber,
)
from frigate.const import (
CACHE_DIR,
CACHE_SEGMENT_FORMAT,
PROCESS_PRIORITY_HIGH,
REQUEST_REGION_GRID,
)
@@ -130,7 +129,7 @@ def capture_frames(
fps.value = frame_rate.eps()
skipped_fps.value = skipped_eps.eps()
current_frame.value = datetime.now().timestamp()
current_frame.value = datetime.datetime.now().timestamp()
frame_name = f"{config.name}_frame{frame_index}"
frame_buffer = frame_manager.write(frame_name)
try:
@@ -200,11 +199,6 @@ class CameraWatchdog(threading.Thread):
self.requestor = InterProcessRequestor()
self.was_enabled = self.config.enabled
self.segment_subscriber = RecordingsDataSubscriber(RecordingsDataTypeEnum.all)
self.latest_valid_segment_time: float = 0
self.latest_invalid_segment_time: float = 0
self.latest_cache_segment_time: float = 0
def _update_enabled_state(self) -> bool:
"""Fetch the latest config and update enabled state."""
self.config_subscriber.check_for_updates()
@@ -249,11 +243,6 @@ class CameraWatchdog(threading.Thread):
if enabled:
self.logger.debug(f"Enabling camera {self.config.name}")
self.start_all_ffmpeg()
# reset all timestamps
self.latest_valid_segment_time = 0
self.latest_invalid_segment_time = 0
self.latest_cache_segment_time = 0
else:
self.logger.debug(f"Disabling camera {self.config.name}")
self.stop_all_ffmpeg()
@@ -271,37 +260,7 @@ class CameraWatchdog(threading.Thread):
if not enabled:
continue
while True:
update = self.segment_subscriber.check_for_update(timeout=0)
if update == (None, None):
break
raw_topic, payload = update
if raw_topic and payload:
topic = str(raw_topic)
camera, segment_time, _ = payload
if camera != self.config.name:
continue
if topic.endswith(RecordingsDataTypeEnum.valid.value):
self.logger.debug(
f"Latest valid recording segment time on {camera}: {segment_time}"
)
self.latest_valid_segment_time = segment_time
elif topic.endswith(RecordingsDataTypeEnum.invalid.value):
self.logger.warning(
f"Invalid recording segment detected for {camera} at {segment_time}"
)
self.latest_invalid_segment_time = segment_time
elif topic.endswith(RecordingsDataTypeEnum.latest.value):
if segment_time is not None:
self.latest_cache_segment_time = segment_time
else:
self.latest_cache_segment_time = 0
now = datetime.now().timestamp()
now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive():
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
@@ -339,55 +298,18 @@ class CameraWatchdog(threading.Thread):
poll = p["process"].poll()
if self.config.record.enabled and "record" in p["roles"]:
now_utc = datetime.now().astimezone(timezone.utc)
latest_cache_dt = (
datetime.fromtimestamp(
self.latest_cache_segment_time, tz=timezone.utc
latest_segment_time = self.get_latest_segment_datetime(
p.get(
"latest_segment_time",
datetime.datetime.now().astimezone(datetime.timezone.utc),
)
if self.latest_cache_segment_time > 0
else now_utc - timedelta(seconds=1)
)
latest_valid_dt = (
datetime.fromtimestamp(
self.latest_valid_segment_time, tz=timezone.utc
)
if self.latest_valid_segment_time > 0
else now_utc - timedelta(seconds=1)
)
latest_invalid_dt = (
datetime.fromtimestamp(
self.latest_invalid_segment_time, tz=timezone.utc
)
if self.latest_invalid_segment_time > 0
else now_utc - timedelta(seconds=1)
)
# ensure segments are still being created and that they have valid video data
cache_stale = now_utc > (latest_cache_dt + timedelta(seconds=120))
valid_stale = now_utc > (latest_valid_dt + timedelta(seconds=120))
invalid_stale_condition = (
self.latest_invalid_segment_time > 0
and now_utc > (latest_invalid_dt + timedelta(seconds=120))
and self.latest_valid_segment_time
<= self.latest_invalid_segment_time
)
invalid_stale = invalid_stale_condition
if cache_stale or valid_stale or invalid_stale:
if cache_stale:
reason = "No new recording segments were created"
elif valid_stale:
reason = "No new valid recording segments were created"
else: # invalid_stale
reason = (
"No valid segments created since last invalid segment"
)
if datetime.datetime.now().astimezone(datetime.timezone.utc) > (
latest_segment_time + datetime.timedelta(seconds=120)
):
self.logger.error(
f"{reason} for {self.config.name} in the last 120s. Restarting the ffmpeg record process..."
f"No new recording segments were created for {self.config.name} in the last 120s. restarting the ffmpeg record process..."
)
p["process"] = start_or_restart_ffmpeg(
p["cmd"],
@@ -406,7 +328,7 @@ class CameraWatchdog(threading.Thread):
self.requestor.send_data(
f"{self.config.name}/status/record", "online"
)
p["latest_segment_time"] = self.latest_cache_segment_time
p["latest_segment_time"] = latest_segment_time
if poll is None:
continue
@@ -424,7 +346,6 @@ class CameraWatchdog(threading.Thread):
self.stop_all_ffmpeg()
self.logpipe.close()
self.config_subscriber.stop()
self.segment_subscriber.stop()
def start_ffmpeg_detect(self):
ffmpeg_cmd = [
@@ -484,6 +405,33 @@ class CameraWatchdog(threading.Thread):
p["logpipe"].close()
self.ffmpeg_other_processes.clear()
def get_latest_segment_datetime(
self, latest_segment: datetime.datetime
) -> datetime.datetime:
"""Checks if ffmpeg is still writing recording segments to cache."""
cache_files = sorted(
[
d
for d in os.listdir(CACHE_DIR)
if os.path.isfile(os.path.join(CACHE_DIR, d))
and d.endswith(".mp4")
and not d.startswith("preview_")
]
)
newest_segment_time = latest_segment
for file in cache_files:
if self.config.name in file:
basename = os.path.splitext(file)[0]
_, date = basename.rsplit("@", maxsplit=1)
segment_time = datetime.datetime.strptime(
date, CACHE_SEGMENT_FORMAT
).astimezone(datetime.timezone.utc)
if segment_time > newest_segment_time:
newest_segment_time = segment_time
return newest_segment_time
class CameraCaptureRunner(threading.Thread):
def __init__(
@@ -779,7 +727,10 @@ def process_frames(
time.sleep(0.1)
continue
if datetime.now().astimezone(timezone.utc) > next_region_update:
if (
datetime.datetime.now().astimezone(datetime.timezone.utc)
> next_region_update
):
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name)
next_region_update = get_tomorrow_at_time(2)

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env python3
"""
Generate English translation JSON files from Pydantic config models.
This script dynamically extracts all top-level config sections from FrigateConfig
and generates JSON translation files with titles and descriptions for the web UI.
"""
import json
import logging
import shutil
from pathlib import Path
from typing import Any, Dict, Optional, get_args, get_origin
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from frigate.config.config import FrigateConfig
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def get_field_translations(field_info: FieldInfo) -> Dict[str, str]:
"""Extract title and description from a Pydantic field."""
translations = {}
if field_info.title:
translations["label"] = field_info.title
if field_info.description:
translations["description"] = field_info.description
return translations
def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]:
"""
Recursively process a Pydantic model to extract translations.
Returns a nested dictionary structure matching the config schema,
with title and description for each field.
"""
translations = {}
model_fields = model.model_fields
for field_name, field_info in model_fields.items():
field_translations = get_field_translations(field_info)
# Get the field's type annotation
field_type = field_info.annotation
# Handle Optional types
origin = get_origin(field_type)
if origin is Optional or (
hasattr(origin, "__name__") and origin.__name__ == "UnionType"
):
args = get_args(field_type)
field_type = next(
(arg for arg in args if arg is not type(None)), field_type
)
# Handle Dict types (like Dict[str, CameraConfig])
if get_origin(field_type) is dict:
dict_args = get_args(field_type)
if len(dict_args) >= 2:
value_type = dict_args[1]
if isinstance(value_type, type) and issubclass(value_type, BaseModel):
nested_translations = process_model_fields(value_type)
if nested_translations:
field_translations["properties"] = nested_translations
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
nested_translations = process_model_fields(field_type)
if nested_translations:
field_translations["properties"] = nested_translations
if field_translations:
translations[field_name] = field_translations
return translations
def generate_section_translation(
section_name: str, field_info: FieldInfo
) -> Dict[str, Any]:
"""
Generate translation structure for a top-level config section.
"""
section_translations = get_field_translations(field_info)
field_type = field_info.annotation
origin = get_origin(field_type)
if origin is Optional or (
hasattr(origin, "__name__") and origin.__name__ == "UnionType"
):
args = get_args(field_type)
field_type = next((arg for arg in args if arg is not type(None)), field_type)
# Handle Dict types (like detectors, cameras, camera_groups)
if get_origin(field_type) is dict:
dict_args = get_args(field_type)
if len(dict_args) >= 2:
value_type = dict_args[1]
if isinstance(value_type, type) and issubclass(value_type, BaseModel):
nested = process_model_fields(value_type)
if nested:
section_translations["properties"] = nested
# If the field itself is a BaseModel, process it
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
nested = process_model_fields(field_type)
if nested:
section_translations["properties"] = nested
return section_translations
def main():
"""Main function to generate config translations."""
# Define output directory
output_dir = Path(__file__).parent / "web" / "public" / "locales" / "en" / "config"
logger.info(f"Output directory: {output_dir}")
# Clean and recreate the output directory
if output_dir.exists():
logger.info(f"Removing existing directory: {output_dir}")
shutil.rmtree(output_dir)
logger.info(f"Creating directory: {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
config_fields = FrigateConfig.model_fields
logger.info(f"Found {len(config_fields)} top-level config sections")
for field_name, field_info in config_fields.items():
if field_name.startswith("_"):
continue
logger.info(f"Processing section: {field_name}")
section_data = generate_section_translation(field_name, field_info)
if not section_data:
logger.warning(f"No translations found for section: {field_name}")
continue
output_file = output_dir / f"{field_name}.json"
with open(output_file, "w", encoding="utf-8") as f:
json.dump(section_data, f, indent=2, ensure_ascii=False)
logger.info(f"Generated: {output_file}")
logger.info("Translation generation complete!")
if __name__ == "__main__":
main()

View File

@@ -4,8 +4,8 @@
"rsc": false,
"tsx": true,
"tailwind": {
"config": "tailwind.config.cjs",
"css": "src/index.css",
"config": "tailwind.config.js",
"css": "index.css",
"baseColor": "slate",
"cssVariables": true
},

752
web/package-lock.json generated
View File

@@ -15,7 +15,7 @@
"@radix-ui/react-aspect-ratio": "^1.1.2",
"@radix-ui/react-checkbox": "^1.1.4",
"@radix-ui/react-context-menu": "^2.2.6",
"@radix-ui/react-dialog": "^1.1.15",
"@radix-ui/react-dialog": "^1.1.6",
"@radix-ui/react-dropdown-menu": "^2.1.6",
"@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.2",
@@ -23,14 +23,14 @@
"@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.6",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-separator": "^1.1.2",
"@radix-ui/react-slider": "^1.2.3",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-slot": "^1.2.2",
"@radix-ui/react-switch": "^1.1.3",
"@radix-ui/react-tabs": "^1.1.3",
"@radix-ui/react-toggle": "^1.1.2",
"@radix-ui/react-toggle-group": "^1.1.2",
"@radix-ui/react-tooltip": "^1.2.8",
"@radix-ui/react-tooltip": "^1.1.8",
"apexcharts": "^3.52.0",
"axios": "^1.7.7",
"class-variance-authority": "^0.7.1",
@@ -1250,42 +1250,6 @@
}
}
},
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-dialog": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.6.tgz",
"integrity": "sha512-/IVhJV5AceX620DUJ4uYVMymzsipdKBzo3edo+omeskCKGm9FRHM0ebIdbPnlQVJqyuHbuBltQUOG2mOTq2IYw==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.1",
"@radix-ui/react-compose-refs": "1.1.1",
"@radix-ui/react-context": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.5",
"@radix-ui/react-focus-guards": "1.1.1",
"@radix-ui/react-focus-scope": "1.1.2",
"@radix-ui/react-id": "1.1.0",
"@radix-ui/react-portal": "1.1.4",
"@radix-ui/react-presence": "1.1.2",
"@radix-ui/react-primitive": "2.0.2",
"@radix-ui/react-slot": "1.1.2",
"@radix-ui/react-use-controllable-state": "1.1.0",
"aria-hidden": "^1.2.4",
"react-remove-scroll": "^2.6.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
@@ -1483,23 +1447,23 @@
}
},
"node_modules/@radix-ui/react-dialog": {
"version": "1.1.15",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz",
"integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==",
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.6.tgz",
"integrity": "sha512-/IVhJV5AceX620DUJ4uYVMymzsipdKBzo3edo+omeskCKGm9FRHM0ebIdbPnlQVJqyuHbuBltQUOG2mOTq2IYw==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-dismissable-layer": "1.1.11",
"@radix-ui/react-focus-guards": "1.1.3",
"@radix-ui/react-focus-scope": "1.1.7",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-portal": "1.1.9",
"@radix-ui/react-presence": "1.1.5",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-use-controllable-state": "1.2.2",
"@radix-ui/primitive": "1.1.1",
"@radix-ui/react-compose-refs": "1.1.1",
"@radix-ui/react-context": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.5",
"@radix-ui/react-focus-guards": "1.1.1",
"@radix-ui/react-focus-scope": "1.1.2",
"@radix-ui/react-id": "1.1.0",
"@radix-ui/react-portal": "1.1.4",
"@radix-ui/react-presence": "1.1.2",
"@radix-ui/react-primitive": "2.0.2",
"@radix-ui/react-slot": "1.1.2",
"@radix-ui/react-use-controllable-state": "1.1.0",
"aria-hidden": "^1.2.4",
"react-remove-scroll": "^2.6.3"
},
@@ -1518,255 +1482,14 @@
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/primitive": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
"license": "MIT"
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-compose-refs": {
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
"integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz",
"integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
"integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-escape-keydown": "1.1.1"
"@radix-ui/react-compose-refs": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-guards": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz",
"integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-scope": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
"integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-id": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz",
"integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-presence": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz",
"integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
"integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-controllable-state": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz",
"integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-effect-event": "0.0.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz",
"integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
@@ -2350,35 +2073,12 @@
}
},
"node_modules/@radix-ui/react-separator": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz",
"integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==",
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.2.tgz",
"integrity": "sha512-oZfHcaAp2Y6KFBX6I5P1u7CQoy4lheCGiYj+pGFrHy8E/VNRb5E39TkTr3JrV520csPBTZjkuKFdEsjS5EUNKQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
"@radix-ui/react-primitive": "2.0.2"
},
"peerDependencies": {
"@types/react": "*",
@@ -2429,9 +2129,9 @@
}
},
"node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.2.tgz",
"integrity": "sha512-y7TBO4xN4Y94FvcWIOIh18fM4R1A8S4q1jhoz4PNzOoHsFcN8pogcFmZrTYAm4F9VRUrWP/Mw7xSKybIeRI+CQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
@@ -2575,23 +2275,23 @@
}
},
"node_modules/@radix-ui/react-tooltip": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz",
"integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==",
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.8.tgz",
"integrity": "sha512-YAA2cu48EkJZdAMHC0dqo9kialOcRStbtiY4nJPaht7Ptrhcvpo+eDChaM6BIs8kL6a8Z5l5poiqLnXcNduOkA==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-dismissable-layer": "1.1.11",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-popper": "1.2.8",
"@radix-ui/react-portal": "1.1.9",
"@radix-ui/react-presence": "1.1.5",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-use-controllable-state": "1.2.2",
"@radix-ui/react-visually-hidden": "1.2.3"
"@radix-ui/primitive": "1.1.1",
"@radix-ui/react-compose-refs": "1.1.1",
"@radix-ui/react-context": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.5",
"@radix-ui/react-id": "1.1.0",
"@radix-ui/react-popper": "1.2.2",
"@radix-ui/react-portal": "1.1.4",
"@radix-ui/react-presence": "1.1.2",
"@radix-ui/react-primitive": "2.0.2",
"@radix-ui/react-slot": "1.1.2",
"@radix-ui/react-use-controllable-state": "1.1.0",
"@radix-ui/react-visually-hidden": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
@@ -2608,99 +2308,13 @@
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/primitive": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
"license": "MIT"
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-arrow": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz",
"integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-compose-refs": {
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
"integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-context": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz",
"integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
"integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-escape-keydown": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-id": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz",
"integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
"@radix-ui/react-compose-refs": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
@@ -2712,241 +2326,6 @@
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-popper": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz",
"integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==",
"license": "MIT",
"dependencies": {
"@floating-ui/react-dom": "^2.0.0",
"@radix-ui/react-arrow": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-layout-effect": "1.1.1",
"@radix-ui/react-use-rect": "1.1.1",
"@radix-ui/react-use-size": "1.1.1",
"@radix-ui/rect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-presence": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz",
"integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
"integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-controllable-state": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz",
"integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-effect-event": "0.0.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz",
"integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-rect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz",
"integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==",
"license": "MIT",
"dependencies": {
"@radix-ui/rect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-size": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz",
"integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-visually-hidden": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz",
"integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/rect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz",
"integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==",
"license": "MIT"
},
"node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz",
@@ -2980,39 +2359,6 @@
}
}
},
"node_modules/@radix-ui/react-use-effect-event": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz",
"integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-use-effect-event/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz",

View File

@@ -21,7 +21,7 @@
"@radix-ui/react-aspect-ratio": "^1.1.2",
"@radix-ui/react-checkbox": "^1.1.4",
"@radix-ui/react-context-menu": "^2.2.6",
"@radix-ui/react-dialog": "^1.1.15",
"@radix-ui/react-dialog": "^1.1.6",
"@radix-ui/react-dropdown-menu": "^2.1.6",
"@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.2",
@@ -29,14 +29,14 @@
"@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.6",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-separator": "^1.1.2",
"@radix-ui/react-slider": "^1.2.3",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-slot": "^1.2.2",
"@radix-ui/react-switch": "^1.1.3",
"@radix-ui/react-tabs": "^1.1.3",
"@radix-ui/react-toggle": "^1.1.2",
"@radix-ui/react-toggle-group": "^1.1.2",
"@radix-ui/react-tooltip": "^1.2.8",
"@radix-ui/react-tooltip": "^1.1.8",
"apexcharts": "^3.52.0",
"axios": "^1.7.7",
"class-variance-authority": "^0.7.1",

View File

@@ -56,14 +56,7 @@
"formattedTimestampMonthDayYear": {
"12hour": "МММ д, гггг",
"24hour": "МММ д, гггг"
},
"ago": "Преди {{timeAgo}}",
"untilForTime": "До {{time}}",
"untilForRestart": "Докато Frigate рестартира.",
"untilRestart": "До рестарт",
"mo": "{{time}}мес",
"m": "{{time}}м",
"s": "{{time}}с"
}
},
"button": {
"apply": "Приложи",

View File

@@ -423,7 +423,7 @@
"paths": {
"title": "Cesty",
"desc": "Zobrazit významné body trasy sledovaného objektu",
"tips": "<p><strong>Cesty</strong></p><br><p>Čáry a kruhy označují významné body, kterými se sledovaný objekt během svého životního cyklu pohyboval.</p>"
"tips": "<p><strong>Cesty</strong></p><br><p>Čáry a kruhy označují významné body, kterými se sledovaný objekt během svého životního cyklu pohyboval."
}
},
"camera": {
@@ -604,8 +604,7 @@
"admin": "Správce",
"adminDesc": "Plný přístup ke všem funkcím.",
"viewer": "Divák",
"viewerDesc": "Omezení pouze na Živé dashboardy, Revize, Průzkumníka a Exporty.",
"customDesc": "Vlastní role s konkrétním přístupem ke kameře."
"viewerDesc": "Omezení pouze na Živé dashboardy, Revize, Průzkumníka a Exporty."
},
"title": "Změnit Roli Uživatele",
"desc": "Aktualizovat oprávnění pro <strong>{{username}}</strong>",
@@ -795,99 +794,9 @@
"title": "Obsah",
"imagePlaceholder": "Vybrat obrázek",
"textPlaceholder": "Zadat textový obsah",
"imageDesc": "Vybrat obrázek, který spustí tuto akci, když bude detekován podobný obrázek.",
"textDesc": "Zadejte text, který spustí tuto akci, když bude zjištěn podobný popis sledovaného objektu.",
"error": {
"required": "Obsah je povinný."
}
},
"actions": {
"title": "Akce",
"desc": "Ve výchozím nastavení Frigate odesílá MQTT zprávu pro všechny spouštěče. Zvolte dodatečnou akci, která se má provést, když se tento spouštěč aktivuje.",
"error": {
"min": "Musí být vybrána alespoň jedna akce."
}
},
"threshold": {
"title": "Práh",
"error": {
"min": "Práh musí být alespoň 0",
"max": "Práh musí být nanejvýš 1"
}
"imageDesc": "Vybrat obrázek, který spustí tuto akci, když bude detekován podobný obrázek."
}
}
},
"toast": {
"success": {
"createTrigger": "Spouštěč {{name}} byl úspěšně vytvořen.",
"updateTrigger": "Spouštěč {{name}} byl úspěšně aktualizován.",
"deleteTrigger": "Spouštěč {{name}} byl úspěšně smazán."
},
"error": {
"createTriggerFailed": "Nepodařilo se vytvořit spouštěč: {{errorMessage}}",
"updateTriggerFailed": "Nepodařilo se aktualizovat spouštěč: {{errorMessage}}",
"deleteTriggerFailed": "Nepodařilo se smazat spouštěč: {{errorMessage}}"
}
}
},
"roles": {
"addRole": "Přidat roli",
"table": {
"role": "Role",
"cameras": "Kamery",
"actions": "Akce",
"noRoles": "Nebyly nalezeny žádné vlastní role.",
"editCameras": "Upravit kamery",
"deleteRole": "Smazat roli"
},
"toast": {
"success": {
"createRole": "Role {{role}} byla úspěšně vytvořena",
"updateCameras": "Kamery byly aktualizovány pro roli {{role}}",
"deleteRole": "Role {{role}} byla úspěšně smazána",
"userRolesUpdated": "{{count}} uživatel(ů) přiřazených k této roli bylo aktualizováno na „Divák“, který má přístup ke všem kamerám."
},
"error": {
"createRoleFailed": "Nepodařilo se vytvořit roli: {{errorMessage}}",
"updateCamerasFailed": "Nepodařilo se aktualizovat kamery: {{errorMessage}}",
"deleteRoleFailed": "Nepodařilo se smazat roli: {{errorMessage}}",
"userUpdateFailed": "Nepodařilo se aktualizovat role uživatele: {{errorMessage}}"
}
},
"dialog": {
"createRole": {
"title": "Vytvořit novou roli",
"desc": "Přidejte novou roli a určete oprávnění k přístupu ke kamerám."
},
"deleteRole": {
"title": "Smazat roli",
"warn": "Opravdu chcete smazat roli <strong>{{role}}</strong>?",
"deleting": "Mazání...",
"desc": "Tuto akci nelze vrátit zpět. Role bude trvale smazána a všichni uživatelé s touto rolí budou přeřazeni do role „Divák“, která poskytne přístup ke všem kamerám."
},
"form": {
"role": {
"title": "Název role",
"placeholder": "Zadejte název role",
"desc": "Povolena jsou pouze písmena, čísla, tečky a podtržítka.",
"roleIsRequired": "Název role je povinný",
"roleOnlyInclude": "Název role smí obsahovat pouze písmena, čísla, . nebo _",
"roleExists": "Role s tímto názvem již existuje."
},
"cameras": {
"title": "Kamery",
"desc": "Vyberte kamery, ke kterým má tato role přístup. Je vyžadována alespoň jedna kamera.",
"required": "Musí být vybrána alespoň jedna kamera."
}
},
"editCameras": {
"desc": "Aktualizujte přístup ke kamerám pro roli <strong>{{role}}</strong>.",
"title": "Upravit kamery role"
}
},
"management": {
"title": "Správa role diváka",
"desc": "Spravujte vlastní role diváků a jejich oprávnění k přístupu ke kamerám pro tuto instanci Frigate."
}
}
}

View File

@@ -5,80 +5,5 @@
"moo": "Bučanie",
"cowbell": "Kravský zvonec",
"pig": "Prasa",
"speech": "Tale",
"bicycle": "Cykel",
"car": "Bil",
"bellow": "Under",
"motorcycle": "Motorcykel",
"whispering": "Hvisker",
"bus": "Bus",
"laughter": "Latter",
"train": "Tog",
"boat": "Båd",
"crying": "Græder",
"tambourine": "Tambourin",
"marimba": "Marimba",
"trumpet": "Trumpet",
"trombone": "Trombone",
"violin": "Violin",
"flute": "Fløjte",
"saxophone": "Saxofon",
"clarinet": "Klarinet",
"harp": "Harpe",
"bell": "Klokke",
"harmonica": "Harmonika",
"bagpipes": "Sækkepibe",
"didgeridoo": "Didgeridoo",
"jazz": "Jazz",
"opera": "Opera",
"dubstep": "Dubstep",
"blues": "Blues",
"song": "Sang",
"lullaby": "Vuggevise",
"wind": "Vind",
"thunderstorm": "Tordenvejr",
"thunder": "Torden",
"water": "Vand",
"rain": "Regn",
"raindrop": "Regndråbe",
"waterfall": "Vandfald",
"waves": "Bølger",
"fire": "Ild",
"vehicle": "Køretøj",
"sailboat": "Sejlbåd",
"rowboat": "Robåd",
"motorboat": "Motorbåd",
"ship": "Skib",
"ambulance": "Ambulance",
"helicopter": "Helikopter",
"skateboard": "Skateboard",
"chainsaw": "Motorsav",
"door": "Dør",
"doorbell": "Dørklokke",
"slam": "Smæk",
"knock": "Bank",
"squeak": "Knirke",
"dishes": "Tallerkener",
"cutlery": "Bestik",
"sink": "Håndvask",
"bathtub": "Badekar",
"toothbrush": "Tandbørste",
"zipper": "Lynlås",
"coin": "Mønt",
"scissors": "Saks",
"typewriter": "Skrivemaskine",
"alarm": "Alarm",
"telephone": "Telefon",
"ringtone": "Ringetone",
"siren": "Sirene",
"foghorn": "Tågehorn",
"whistle": "Fløjte",
"clock": "Ur",
"printer": "Printer",
"camera": "Kamera",
"tools": "Værktøj",
"hammer": "Hammer",
"drill": "Bore",
"explosion": "Eksplosion",
"fireworks": "Nytårskrudt"
"speech": "Tale"
}

View File

@@ -5,9 +5,7 @@
"login": "Log ind",
"errors": {
"usernameRequired": "Brugernavn kræves",
"passwordRequired": "Kodeord kræves",
"loginFailed": "Login fejlede",
"unknownError": "Ukendt fejl. Tjek logs."
"passwordRequired": "Kodeord kræves"
}
}
}

View File

@@ -1,17 +1,6 @@
{
"group": {
"label": "Kamera Grupper",
"add": "Tilføj Kameragruppe",
"edit": "Rediger Kamera Gruppe",
"delete": {
"label": "Slet kamera gruppe",
"confirm": {
"title": "Bekræft sletning",
"desc": "Er du sikker på at du vil slette kamera gruppen <em>{{name}}</em>?"
}
},
"name": {
"label": "Navn"
}
"add": "Tilføj Kameragruppe"
}
}

View File

@@ -1,9 +1 @@
{
"restart": {
"title": "Er du sikker på at du vil genstarte Frigate?",
"button": "Genstart",
"restarting": {
"title": "Frigate genstarter"
}
}
}
{}

View File

@@ -1,17 +1 @@
{
"filter": "Filter",
"classes": {
"label": "Klasser",
"all": {
"title": "Alle klasser"
},
"count_one": "{{count}} Klasse",
"count_other": "{{count}} Klasser"
},
"labels": {
"all": {
"short": "Labels"
},
"count_one": "{{count}} Label"
}
}
{}

View File

@@ -1,8 +1,5 @@
{
"iconPicker": {
"selectIcon": "Vælg et ikon",
"search": {
"placeholder": "Søg efter ikoner…"
}
"selectIcon": "Vælg et ikon"
}
}

View File

@@ -1,7 +1 @@
{
"button": {
"downloadVideo": {
"label": "Download Video"
}
}
}
{}

View File

@@ -1,5 +1 @@
{
"noRecordingsFoundForThisTime": "Ingen optagelser fundet i det angivet tidsrum",
"noPreviewFound": "Ingen forhåndsvisning fundet",
"cameraDisabled": "Kamera er deaktiveret"
}
{}

View File

@@ -1,18 +1,3 @@
{
"person": "Person",
"bicycle": "Cykel",
"car": "Bil",
"motorcycle": "Motorcykel",
"airplane": "Flyvemaskine",
"bus": "Bus",
"train": "Tog",
"boat": "Båd",
"traffic_light": "Trafiklys",
"vehicle": "Køretøj",
"skateboard": "Skateboard",
"door": "Dør",
"sink": "Håndvask",
"toothbrush": "Tandbørste",
"scissors": "Saks",
"clock": "Ur"
"person": "Person"
}

View File

@@ -1,6 +1 @@
{
"documentTitle": "Konfigurationsstyring - Frigate",
"copyConfig": "Kopiér konfiguration",
"saveAndRestart": "Gem & Genstart",
"saveOnly": "Kun gem"
}
{}

View File

@@ -1,11 +1 @@
{
"alerts": "Alarmer",
"detections": "Detekteringer",
"motion": {
"label": "Bevægelse",
"only": "Kun bevægelse"
},
"allCameras": "Alle kameraer",
"timeline": "Tidslinje",
"camera": "Kamera"
}
{}

View File

@@ -9,11 +9,5 @@
"lifecycleItemDesc": {
"active": "{{label}} blev aktiv"
}
},
"exploreIsUnavailable": {
"embeddingsReindexing": {
"startingUp": "Starter…",
"estimatedTime": "Estimeret tid tilbage:"
}
}
}

View File

@@ -1,9 +1,4 @@
{
"documentTitle": "Eksporter - Frigate",
"search": "Søg",
"deleteExport.desc": "Er du sikker på at du vil slette {{exportName}}?",
"editExport": {
"title": "Omdøb Eksport",
"saveExport": "Gem Eksport"
}
"search": "Søg"
}

View File

@@ -1,10 +1,3 @@
{
"selectItem": "Vælg {{item}}",
"description": {
"addFace": "Gennemgang af tilføjelse til ansigts bibliotek",
"placeholder": "Angiv et navn for bibliotek"
},
"details": {
"person": "Person"
}
"selectItem": "Vælg {{item}}"
}

View File

@@ -1,12 +1 @@
{
"documentTitle": "Live - Frigate",
"documentTitle.withCamera": "{{camera}} - Live - Frigate",
"twoWayTalk": {
"enable": "Aktivér tovejskommunikation",
"disable": "Deaktiver tovejskommunikation"
},
"cameraAudio": {
"enable": "Aktivér kameralyd",
"disable": "Deaktivér kamera lyd"
}
}
{}

View File

@@ -1,11 +1 @@
{
"filter": "Filter",
"export": "Eksporter",
"calendar": "Kalender",
"filters": "Filtere",
"toast": {
"error": {
"endTimeMustAfterStartTime": "Sluttidspunkt skal være efter starttidspunkt"
}
}
}
{}

View File

@@ -1,11 +1,3 @@
{
"search": "Søg",
"savedSearches": "Gemte Søgninger",
"searchFor": "Søg efter {{inputValue}}",
"button": {
"save": "Gem søgning",
"delete": "Slet gemt søgning",
"filterInformation": "Filter information",
"filterActive": "Filtre aktiv"
}
"search": "Søg"
}

View File

@@ -1,8 +1,5 @@
{
"documentTitle": {
"default": "Indstillinger - Frigate",
"authentication": "Bruger Indstillinger - Frigate",
"camera": "Kamera indstillinger - Frigate",
"object": "Debug - Frigate"
"default": "Indstillinger - Frigate"
}
}

View File

@@ -1,12 +1 @@
{
"documentTitle": {
"cameras": "Kamera Statistik - Frigate",
"storage": "Lagrings Statistik - Frigate",
"logs": {
"frigate": "Frigate Logs - Frigate",
"go2rtc": "Go2RTC Logs - Frigate",
"nginx": "Nginx Logs - Frigate"
}
},
"title": "System"
}
{}

View File

@@ -1,26 +0,0 @@
{
"label": "Global Audio events configuration.",
"properties": {
"enabled": {
"label": "Enable audio events."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event."
},
"min_volume": {
"label": "Min volume required to run audio detection."
},
"listen": {
"label": "Audio to listen for."
},
"filters": {
"label": "Audio filters."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection."
},
"num_threads": {
"label": "Number of detection threads"
}
}
}

View File

@@ -1,23 +0,0 @@
{
"label": "Audio transcription config.",
"properties": {
"enabled": {
"label": "Enable audio transcription."
},
"language": {
"label": "Language abbreviation to use for audio event transcription/translation."
},
"device": {
"label": "The device used for license plate recognition."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"enabled_in_config": {
"label": "Keep track of original state of camera."
},
"live_enabled": {
"label": "Enable live transcriptions."
}
}
}

View File

@@ -1,35 +0,0 @@
{
"label": "Auth configuration.",
"properties": {
"enabled": {
"label": "Enable authentication"
},
"reset_admin_password": {
"label": "Reset the admin password on startup"
},
"cookie_name": {
"label": "Name for jwt token cookie"
},
"cookie_secure": {
"label": "Set secure flag on cookie"
},
"session_length": {
"label": "Session length for jwt session tokens"
},
"refresh_time": {
"label": "Refresh the session if it is going to expire in this many seconds"
},
"failed_login_rate_limit": {
"label": "Rate limits for failed login attempts."
},
"trusted_proxies": {
"label": "Trusted proxies for determining IP address to rate limit"
},
"hash_iterations": {
"label": "Password hash iterations"
},
"roles": {
"label": "Role to camera mappings. Empty list grants access to all cameras."
}
}
}

View File

@@ -1,37 +0,0 @@
{
"label": "Birdseye configuration.",
"properties": {
"enabled": {
"label": "Enable birdseye view."
},
"mode": {
"label": "Tracking mode."
},
"restream": {
"label": "Restream birdseye via RTSP."
},
"width": {
"label": "Birdseye width."
},
"height": {
"label": "Birdseye height."
},
"quality": {
"label": "Encoding quality."
},
"inactivity_threshold": {
"label": "Birdseye Inactivity Threshold"
},
"layout": {
"label": "Birdseye Layout Config",
"properties": {
"scaling_factor": {
"label": "Birdseye Scaling Factor"
},
"max_cameras": {
"label": "Max cameras"
}
}
}
}
}

View File

@@ -1,14 +0,0 @@
{
"label": "Camera group configuration",
"properties": {
"cameras": {
"label": "List of cameras in this group."
},
"icon": {
"label": "Icon that represents camera group."
},
"order": {
"label": "Sort order for group."
}
}
}

View File

@@ -1,761 +0,0 @@
{
"label": "Camera configuration.",
"properties": {
"name": {
"label": "Camera name."
},
"friendly_name": {
"label": "Camera friendly name used in the Frigate UI."
},
"enabled": {
"label": "Enable camera."
},
"audio": {
"label": "Audio events configuration.",
"properties": {
"enabled": {
"label": "Enable audio events."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event."
},
"min_volume": {
"label": "Min volume required to run audio detection."
},
"listen": {
"label": "Audio to listen for."
},
"filters": {
"label": "Audio filters."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection."
},
"num_threads": {
"label": "Number of detection threads"
}
}
},
"audio_transcription": {
"label": "Audio transcription config.",
"properties": {
"enabled": {
"label": "Enable audio transcription."
},
"language": {
"label": "Language abbreviation to use for audio event transcription/translation."
},
"device": {
"label": "The device used for license plate recognition."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"enabled_in_config": {
"label": "Keep track of original state of camera."
},
"live_enabled": {
"label": "Enable live transcriptions."
}
}
},
"birdseye": {
"label": "Birdseye camera configuration.",
"properties": {
"enabled": {
"label": "Enable birdseye view for camera."
},
"mode": {
"label": "Tracking mode for camera."
},
"order": {
"label": "Position of the camera in the birdseye view."
}
}
},
"detect": {
"label": "Object detection configuration.",
"properties": {
"enabled": {
"label": "Detection Enabled."
},
"height": {
"label": "Height of the stream for the detect role."
},
"width": {
"label": "Width of the stream for the detect role."
},
"fps": {
"label": "Number of frames per second to process through detection."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends."
},
"stationary": {
"label": "Stationary objects config.",
"properties": {
"interval": {
"label": "Frame interval for checking stationary objects."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary"
},
"max_frames": {
"label": "Max frames for stationary objects.",
"properties": {
"default": {
"label": "Default max frames."
},
"objects": {
"label": "Object specific max frames."
}
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
}
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by."
}
}
},
"face_recognition": {
"label": "Face recognition config.",
"properties": {
"enabled": {
"label": "Enable face recognition."
},
"min_area": {
"label": "Min area of face box to consider running face recognition."
}
}
},
"ffmpeg": {
"label": "FFmpeg configuration for the camera.",
"properties": {
"path": {
"label": "FFmpeg path"
},
"global_args": {
"label": "Global FFmpeg arguments."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments."
},
"input_args": {
"label": "FFmpeg input arguments."
},
"output_args": {
"label": "FFmpeg output arguments per role.",
"properties": {
"detect": {
"label": "Detect role FFmpeg output arguments."
},
"record": {
"label": "Record role FFmpeg output arguments."
}
}
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players."
},
"inputs": {
"label": "Camera inputs."
}
}
},
"live": {
"label": "Live playback settings.",
"properties": {
"streams": {
"label": "Friendly names and restream names to use for live view."
},
"height": {
"label": "Live camera view height"
},
"quality": {
"label": "Live camera view quality"
}
}
},
"lpr": {
"label": "LPR config.",
"properties": {
"enabled": {
"label": "Enable license plate recognition."
},
"expire_time": {
"label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)."
},
"min_area": {
"label": "Minimum area of license plate to begin running recognition."
},
"enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition."
}
}
},
"motion": {
"label": "Motion detection configuration.",
"properties": {
"enabled": {
"label": "Enable motion on all cameras."
},
"threshold": {
"label": "Motion detection threshold (1-255)."
},
"lightning_threshold": {
"label": "Lightning detection threshold (0.3-1.0)."
},
"improve_contrast": {
"label": "Improve Contrast"
},
"contour_area": {
"label": "Contour Area"
},
"delta_alpha": {
"label": "Delta Alpha"
},
"frame_alpha": {
"label": "Frame Alpha"
},
"frame_height": {
"label": "Frame Height"
},
"mask": {
"label": "Coordinates polygon for the motion mask."
},
"mqtt_off_delay": {
"label": "Delay for updating MQTT with no motion detected."
},
"enabled_in_config": {
"label": "Keep track of original state of motion detection."
}
}
},
"objects": {
"label": "Object configuration.",
"properties": {
"track": {
"label": "Objects to track."
},
"filters": {
"label": "Object filters.",
"properties": {
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration."
}
}
},
"mask": {
"label": "Object mask."
},
"genai": {
"label": "Config for using genai to analyze objects.",
"properties": {
"enabled": {
"label": "Enable GenAI for camera."
},
"use_snapshot": {
"label": "Use snapshots for generating descriptions."
},
"prompt": {
"label": "Default caption prompt."
},
"object_prompts": {
"label": "Object specific prompts."
},
"objects": {
"label": "List of objects to run generative AI for."
},
"required_zones": {
"label": "List of required zones to be entered in order to run generative AI."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
},
"send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object.",
"properties": {
"tracked_object_end": {
"label": "Send once the object is no longer tracked."
},
"after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
}
}
}
}
},
"record": {
"label": "Record configuration.",
"properties": {
"enabled": {
"label": "Enable record on all cameras."
},
"sync_recordings": {
"label": "Sync recordings with disk on startup and once a day."
},
"expire_interval": {
"label": "Number of minutes to wait between cleanup runs."
},
"continuous": {
"label": "Continuous recording retention settings.",
"properties": {
"days": {
"label": "Default retention period."
}
}
},
"motion": {
"label": "Motion recording retention settings.",
"properties": {
"days": {
"label": "Default retention period."
}
}
},
"detections": {
"label": "Detection specific retention settings.",
"properties": {
"pre_capture": {
"label": "Seconds to retain before event starts."
},
"post_capture": {
"label": "Seconds to retain after event ends."
},
"retain": {
"label": "Event retention settings.",
"properties": {
"days": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
}
}
}
}
},
"alerts": {
"label": "Alert specific retention settings.",
"properties": {
"pre_capture": {
"label": "Seconds to retain before event starts."
},
"post_capture": {
"label": "Seconds to retain after event ends."
},
"retain": {
"label": "Event retention settings.",
"properties": {
"days": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
}
}
}
}
},
"export": {
"label": "Recording Export Config",
"properties": {
"timelapse_args": {
"label": "Timelapse Args"
}
}
},
"preview": {
"label": "Recording Preview Config",
"properties": {
"quality": {
"label": "Quality of recording preview."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of recording."
}
}
},
"review": {
"label": "Review configuration.",
"properties": {
"alerts": {
"label": "Review alerts config.",
"properties": {
"enabled": {
"label": "Enable alerts."
},
"labels": {
"label": "Labels to create alerts for."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert."
},
"enabled_in_config": {
"label": "Keep track of original state of alerts."
},
"cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred."
}
}
},
"detections": {
"label": "Review detections config.",
"properties": {
"enabled": {
"label": "Enable detections."
},
"labels": {
"label": "Labels to create detections for."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection."
},
"cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred."
},
"enabled_in_config": {
"label": "Keep track of original state of detections."
}
}
},
"genai": {
"label": "Review description genai config.",
"properties": {
"enabled": {
"label": "Enable GenAI descriptions for review items."
},
"alerts": {
"label": "Enable GenAI for alerts."
},
"detections": {
"label": "Enable GenAI for detections."
},
"additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
},
"preferred_language": {
"label": "Preferred language for GenAI Response"
},
"activity_context_prompt": {
"label": "Custom activity context prompt defining normal activity patterns for this property."
}
}
}
}
},
"semantic_search": {
"label": "Semantic search configuration.",
"properties": {
"triggers": {
"label": "Trigger actions on tracked objects that match existing thumbnails or descriptions",
"properties": {
"enabled": {
"label": "Enable this trigger"
},
"type": {
"label": "Type of trigger"
},
"data": {
"label": "Trigger content (text phrase or image ID)"
},
"threshold": {
"label": "Confidence score required to run the trigger"
},
"actions": {
"label": "Actions to perform when trigger is matched"
}
}
}
}
},
"snapshots": {
"label": "Snapshot configuration.",
"properties": {
"enabled": {
"label": "Snapshots enabled."
},
"clean_copy": {
"label": "Create a clean copy of the snapshot image."
},
"timestamp": {
"label": "Add a timestamp overlay on the snapshot."
},
"bounding_box": {
"label": "Add a bounding box overlay on the snapshot."
},
"crop": {
"label": "Crop the snapshot to the detected object."
},
"required_zones": {
"label": "List of required zones to be entered in order to save a snapshot."
},
"height": {
"label": "Snapshot image height."
},
"retain": {
"label": "Snapshot retention.",
"properties": {
"default": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
},
"objects": {
"label": "Object retention period."
}
}
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)."
}
}
},
"timestamp_style": {
"label": "Timestamp style configuration.",
"properties": {
"position": {
"label": "Timestamp position."
},
"format": {
"label": "Timestamp format."
},
"color": {
"label": "Timestamp color.",
"properties": {
"red": {
"label": "Red"
},
"green": {
"label": "Green"
},
"blue": {
"label": "Blue"
}
}
},
"thickness": {
"label": "Timestamp thickness."
},
"effect": {
"label": "Timestamp effect."
}
}
},
"best_image_timeout": {
"label": "How long to wait for the image with the highest confidence score."
},
"mqtt": {
"label": "MQTT configuration.",
"properties": {
"enabled": {
"label": "Send image over MQTT."
},
"timestamp": {
"label": "Add timestamp to MQTT image."
},
"bounding_box": {
"label": "Add bounding box to MQTT image."
},
"crop": {
"label": "Crop MQTT image to detected object."
},
"height": {
"label": "MQTT image height."
},
"required_zones": {
"label": "List of required zones to be entered in order to send the image."
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)."
}
}
},
"notifications": {
"label": "Notifications configuration.",
"properties": {
"enabled": {
"label": "Enable notifications"
},
"email": {
"label": "Email required for push."
},
"cooldown": {
"label": "Cooldown period for notifications (time in seconds)."
},
"enabled_in_config": {
"label": "Keep track of original state of notifications."
}
}
},
"onvif": {
"label": "Camera Onvif Configuration.",
"properties": {
"host": {
"label": "Onvif Host"
},
"port": {
"label": "Onvif Port"
},
"user": {
"label": "Onvif Username"
},
"password": {
"label": "Onvif Password"
},
"tls_insecure": {
"label": "Onvif Disable TLS verification"
},
"autotracking": {
"label": "PTZ auto tracking config.",
"properties": {
"enabled": {
"label": "Enable PTZ object autotracking."
},
"calibrate_on_startup": {
"label": "Perform a camera calibration when Frigate starts."
},
"zooming": {
"label": "Autotracker zooming mode."
},
"zoom_factor": {
"label": "Zooming factor (0.1-0.75)."
},
"track": {
"label": "Objects to track."
},
"required_zones": {
"label": "List of required zones to be entered in order to begin autotracking."
},
"return_preset": {
"label": "Name of camera preset to return to when object tracking is over."
},
"timeout": {
"label": "Seconds to delay before returning to preset."
},
"movement_weights": {
"label": "Internal value used for PTZ movements based on the speed of your camera's motor."
},
"enabled_in_config": {
"label": "Keep track of original state of autotracking."
}
}
},
"ignore_time_mismatch": {
"label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server"
}
}
},
"type": {
"label": "Camera Type"
},
"ui": {
"label": "Camera UI Modifications.",
"properties": {
"order": {
"label": "Order of camera in UI."
},
"dashboard": {
"label": "Show this camera in Frigate dashboard UI."
}
}
},
"webui_url": {
"label": "URL to visit the camera directly from system page"
},
"zones": {
"label": "Zone configuration.",
"properties": {
"filters": {
"label": "Zone filters.",
"properties": {
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration."
}
}
},
"coordinates": {
"label": "Coordinates polygon for the defined zone."
},
"distances": {
"label": "Real-world distances for the sides of quadrilateral for the defined zone."
},
"inertia": {
"label": "Number of consecutive frames required for object to be considered present in the zone."
},
"loitering_time": {
"label": "Number of seconds that an object must loiter to be considered in the zone."
},
"speed_threshold": {
"label": "Minimum speed value for an object to be considered in the zone."
},
"objects": {
"label": "List of objects that can trigger the zone."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of camera."
}
}
}

View File

@@ -1,58 +0,0 @@
{
"label": "Object classification config.",
"properties": {
"bird": {
"label": "Bird classification config.",
"properties": {
"enabled": {
"label": "Enable bird classification."
},
"threshold": {
"label": "Minimum classification score required to be considered a match."
}
}
},
"custom": {
"label": "Custom Classification Model Configs.",
"properties": {
"enabled": {
"label": "Enable running the model."
},
"name": {
"label": "Name of classification model."
},
"threshold": {
"label": "Classification score threshold to change the state."
},
"object_config": {
"properties": {
"objects": {
"label": "Object types to classify."
},
"classification_type": {
"label": "Type of classification that is applied."
}
}
},
"state_config": {
"properties": {
"cameras": {
"label": "Cameras to run classification on.",
"properties": {
"crop": {
"label": "Crop of image frame on this camera to run classification on."
}
}
},
"motion": {
"label": "If classification should be run when motion is detected in the crop."
},
"interval": {
"label": "Interval to run classification on in seconds."
}
}
}
}
}
}
}

View File

@@ -1,8 +0,0 @@
{
"label": "Database configuration.",
"properties": {
"path": {
"label": "Database path."
}
}
}

View File

@@ -1,51 +0,0 @@
{
"label": "Global object tracking configuration.",
"properties": {
"enabled": {
"label": "Detection Enabled."
},
"height": {
"label": "Height of the stream for the detect role."
},
"width": {
"label": "Width of the stream for the detect role."
},
"fps": {
"label": "Number of frames per second to process through detection."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends."
},
"stationary": {
"label": "Stationary objects config.",
"properties": {
"interval": {
"label": "Frame interval for checking stationary objects."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary"
},
"max_frames": {
"label": "Max frames for stationary objects.",
"properties": {
"default": {
"label": "Default max frames."
},
"objects": {
"label": "Object specific max frames."
}
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
}
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by."
}
}
}

Some files were not shown because too many files have changed in this diff Show More