Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
b48b749c4e Bump @docusaurus/plugin-content-docs from 3.8.1 to 3.9.1 in /docs
Bumps [@docusaurus/plugin-content-docs](https://github.com/facebook/docusaurus/tree/HEAD/packages/docusaurus-plugin-content-docs) from 3.8.1 to 3.9.1.
- [Release notes](https://github.com/facebook/docusaurus/releases)
- [Changelog](https://github.com/facebook/docusaurus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/facebook/docusaurus/commits/v3.9.1/packages/docusaurus-plugin-content-docs)

---
updated-dependencies:
- dependency-name: "@docusaurus/plugin-content-docs"
  dependency-version: 3.9.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-29 14:37:55 +00:00
182 changed files with 2102 additions and 8701 deletions

View File

@@ -50,38 +50,6 @@ function set_libva_version() {
export LIBAVFORMAT_VERSION_MAJOR
}
function setup_homekit_config() {
local config_path="$1"
if [[ ! -f "${config_path}" ]]; then
echo "[INFO] Creating empty HomeKit config file..."
echo '{}' > "${config_path}"
fi
# Convert YAML to JSON for jq processing
local temp_json="/tmp/cache/homekit_config.json"
yq eval -o=json "${config_path}" > "${temp_json}" 2>/dev/null || {
echo "[WARNING] Failed to convert HomeKit config to JSON, skipping cleanup"
return 0
}
# Use jq to filter and keep only the homekit section
local cleaned_json="/tmp/cache/homekit_cleaned.json"
jq '
# Keep only the homekit section if it exists, otherwise empty object
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
# Convert back to YAML and write to the config file
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
echo '{"homekit": {}}' > "${config_path}"
}
# Clean up temp files
rm -f "${temp_json}" "${cleaned_json}"
}
set_libva_version
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
@@ -102,10 +70,6 @@ else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi
# HomeKit configuration persistence setup
readonly homekit_config_path="/config/go2rtc_homekit.yml"
setup_homekit_config "${homekit_config_path}"
readonly config_path="/config"
if [[ -x "${config_path}/go2rtc" ]]; then
@@ -118,7 +82,5 @@ fi
echo "[INFO] Starting go2rtc..."
# Replace the bash process with the go2rtc process, redirecting stderr to stdout
# Use HomeKit config as the primary config so writebacks go there
# The main config from Frigate will be loaded as a secondary config
exec 2>&1
exec "${binary_path}" -config="${homekit_config_path}" -config=/dev/shm/go2rtc.yaml
exec "${binary_path}" -config=/dev/shm/go2rtc.yaml

View File

@@ -17,9 +17,7 @@ http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;

View File

@@ -27,18 +27,6 @@ Threat-level definitions:
This will show in the UI as a list of concerns that each review item has along with the general description.
### Defining Typical Activity
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response. The default `activity_context_prompt` is below:
```
- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
```
### Additional Concerns
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:

View File

@@ -250,7 +250,6 @@ Note that disabling a camera through the config file (`enabled: False`) removes
- Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS).
- Test with a different stream via the UI dropdown (if `live -> streams` is configured).
- For WebRTC-specific issues, ensure port 8555 is forwarded and candidates are set (see (WebRTC Extra Configuration)(#webrtc-extra-configuration)).
- If your cameras are streaming at a high resolution, your browser may be struggling to load all of the streams before the buffering timeout occurs. Frigate prioritizes showing a true live view as quickly as possible. If the fallback occurs often, change your live view settings to use a lower bandwidth substream.
3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**

View File

@@ -3,13 +3,15 @@ id: configuring_go2rtc
title: Configuring go2rtc
---
# Configuring go2rtc
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
- Live stream support for cameras in Home Assistant Integration
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
## Setup a go2rtc stream
# Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
@@ -109,11 +111,11 @@ section.
:::
### Next steps
## Next steps
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router.
## Homekit Configuration
## Important considerations
To add camera streams to Homekit Frigate must be configured in docker to use `host` networking mode. Once that is done, you can use the go2rtc WebUI (accessed via port 1984, which is disabled by default) to share export a camera to Homekit. Any changes made will automatically be saved to `/config/go2rtc_homekit.yml`.
If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts.

997
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@
},
"dependencies": {
"@docusaurus/core": "^3.7.0",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@docusaurus/plugin-content-docs": "^3.9.1",
"@docusaurus/preset-classic": "^3.7.0",
"@docusaurus/theme-mermaid": "^3.6.3",
"@inkeep/docusaurus": "^2.0.16",

View File

@@ -92,15 +92,6 @@ class GenAIReviewConfig(FrigateBaseModel):
title="Preferred language for GenAI Response",
default=None,
)
activity_context_prompt: str = Field(
default="""- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
""",
title="Custom activity context prompt defining normal activity patterns for this property.",
)
class ReviewConfig(FrigateBaseModel):

View File

@@ -1,349 +0,0 @@
"""Post processor for object descriptions using GenAI."""
import datetime
import logging
import os
import threading
from pathlib import Path
from typing import TYPE_CHECKING, Any
import cv2
import numpy as np
from peewee import DoesNotExist
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor
from frigate.data_processing.types import PostProcessDataEnum
from frigate.genai import GenAIClient
from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
from frigate.util.path import get_event_thumbnail_bytes
if TYPE_CHECKING:
from frigate.embeddings import Embeddings
from ..post.api import PostProcessorApi
from ..types import DataProcessorMetrics
logger = logging.getLogger(__name__)
MAX_THUMBNAILS = 10
class ObjectDescriptionProcessor(PostProcessorApi):
def __init__(
self,
config: FrigateConfig,
embeddings: "Embeddings",
requestor: InterProcessRequestor,
metrics: DataProcessorMetrics,
client: GenAIClient,
semantic_trigger_processor: SemanticTriggerProcessor | None,
):
super().__init__(config, metrics, None)
self.config = config
self.embeddings = embeddings
self.requestor = requestor
self.metrics = metrics
self.genai_client = client
self.semantic_trigger_processor = semantic_trigger_processor
self.tracked_events: dict[str, list[Any]] = {}
self.early_request_sent: dict[str, bool] = {}
self.object_desc_speed = InferenceSpeed(self.metrics.object_desc_speed)
self.object_desc_dps = EventsPerSecond()
self.object_desc_dps.start()
def __handle_frame_update(
self, camera: str, data: dict, yuv_frame: np.ndarray
) -> None:
"""Handle an update to a frame for an object."""
camera_config = self.config.cameras[camera]
# no need to save our own thumbnails if genai is not enabled
# or if the object has become stationary
if not data["stationary"]:
if data["id"] not in self.tracked_events:
self.tracked_events[data["id"]] = []
data["thumbnail"] = create_thumbnail(yuv_frame, data["box"])
# Limit the number of thumbnails saved
if len(self.tracked_events[data["id"]]) >= MAX_THUMBNAILS:
# Always keep the first thumbnail for the event
self.tracked_events[data["id"]].pop(1)
self.tracked_events[data["id"]].append(data)
# check if we're configured to send an early request after a minimum number of updates received
if camera_config.objects.genai.send_triggers.after_significant_updates:
if (
len(self.tracked_events.get(data["id"], []))
>= camera_config.objects.genai.send_triggers.after_significant_updates
and data["id"] not in self.early_request_sent
):
if data["has_clip"] and data["has_snapshot"]:
event: Event = Event.get(Event.id == data["id"])
if (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
) and (
not camera_config.objects.genai.required_zones
or set(data["entered_zones"])
& set(camera_config.objects.genai.required_zones)
):
logger.debug(f"{camera} sending early request to GenAI")
self.early_request_sent[data["id"]] = True
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
[
data["thumbnail"]
for data in self.tracked_events[data["id"]]
],
),
).start()
def __handle_frame_finalize(
self, camera: str, event: Event, thumbnail: bytes
) -> None:
"""Handle the finalization of a frame."""
camera_config = self.config.cameras[camera]
if (
camera_config.objects.genai.enabled
and camera_config.objects.genai.send_triggers.tracked_object_end
and (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
)
and (
not camera_config.objects.genai.required_zones
or set(event.zones) & set(camera_config.objects.genai.required_zones)
)
):
self._process_genai_description(event, camera_config, thumbnail)
def __regenerate_description(self, event_id: str, source: str, force: bool) -> None:
"""Regenerate the description for an event."""
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
logger.error(f"Event {event_id} not found for description regeneration")
return
if self.genai_client is None:
logger.error("GenAI not enabled")
return
camera_config = self.config.cameras[event.camera]
if not camera_config.objects.genai.enabled and not force:
logger.error(f"GenAI not enabled for camera {event.camera}")
return
thumbnail = get_event_thumbnail_bytes(event)
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
logger.debug(
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
)
if event.has_snapshot and source == "snapshot":
snapshot_image = self._read_and_crop_snapshot(event)
if not snapshot_image:
return
embed_image = (
[snapshot_image]
if event.has_snapshot and source == "snapshot"
else (
[data["thumbnail"] for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail]
)
)
self._genai_embed_description(event, embed_image)
def process_data(self, frame_data: dict, data_type: PostProcessDataEnum) -> None:
"""Process a frame update."""
self.metrics.object_desc_dps.value = self.object_desc_dps.eps()
if data_type != PostProcessDataEnum.tracked_object:
return
state: str | None = frame_data.get("state", None)
if state is not None:
logger.debug(f"Processing {state} for {frame_data['camera']}")
if state == "update":
self.__handle_frame_update(
frame_data["camera"], frame_data["data"], frame_data["yuv_frame"]
)
elif state == "finalize":
self.__handle_frame_finalize(
frame_data["camera"], frame_data["event"], frame_data["thumbnail"]
)
def handle_request(self, topic: str, data: dict[str, Any]) -> str | None:
"""Handle a request."""
if topic == "regenerate_description":
self.__regenerate_description(
data["event_id"], data["source"], data["force"]
)
return None
def _read_and_crop_snapshot(self, event: Event) -> bytes | None:
"""Read, decode, and crop the snapshot image."""
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}"
)
return None
try:
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# Crop snapshot based on region
# provide full image if region doesn't exist (manual events)
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data.get(
"region", [0, 0, 1, 1]
)
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
return buffer.tobytes()
except Exception:
return None
def _process_genai_description(
self, event: Event, camera_config: CameraConfig, thumbnail
) -> None:
if event.has_snapshot and camera_config.objects.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event)
if not snapshot_image:
return
num_thumbnails = len(self.tracked_events.get(event.id, []))
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.objects.genai.use_snapshot
else (
[data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.objects.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
parents=True, exist_ok=True
)
for idx, data in enumerate(self.tracked_events[event.id], 1):
jpg_bytes: bytes | None = data["thumbnail"]
if jpg_bytes is None:
logger.warning(f"Unable to save thumbnail {idx} for {event.id}.")
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
# Delete tracked events based on the event_id
if event.id in self.tracked_events:
del self.tracked_events[event.id]
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event."""
start = datetime.datetime.now().timestamp()
camera_config = self.config.cameras[event.camera]
description = self.genai_client.generate_object_description(
camera_config, thumbnails, event
)
if not description:
logger.debug("Failed to generate description for %s", event.id)
return
# fire and forget description update
self.requestor.send_data(
UPDATE_EVENT_DESCRIPTION,
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
"camera": event.camera,
},
)
# Embed the description
if self.config.semantic_search.enabled:
self.embeddings.embed_description(event.id, description)
# Check semantic trigger for this description
if self.semantic_trigger_processor is not None:
self.semantic_trigger_processor.process_data(
{"event_id": event.id, "camera": event.camera, "type": "text"},
PostProcessDataEnum.tracked_object,
)
# Update inference timing metrics
self.object_desc_speed.update(datetime.datetime.now().timestamp() - start)
self.object_desc_dps.update()
logger.debug(
"Generated description for %s (%d images): %s",
event.id,
len(thumbnails),
description,
)

View File

@@ -43,21 +43,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
self.review_descs_dps = EventsPerSecond()
self.review_descs_dps.start()
def calculate_frame_count(self) -> int:
"""Calculate optimal number of frames based on context size."""
# With our preview images (height of 180px) each image should be ~100 tokens per image
# We want to be conservative to not have too long of query times with too many images
context_size = self.genai_client.get_context_size()
if context_size > 10000:
return 20
elif context_size > 6000:
return 16
elif context_size > 4000:
return 12
else:
return 8
def process_data(self, data, data_type):
self.metrics.review_desc_dps.value = self.review_descs_dps.eps()
@@ -191,6 +176,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
camera: str,
start_time: float,
end_time: float,
desired_frame_count: int = 12,
) -> list[str]:
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera}"
@@ -217,8 +203,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
all_frames.append(os.path.join(preview_dir, file))
frame_count = len(all_frames)
desired_frame_count = self.calculate_frame_count()
if frame_count <= desired_frame_count:
return all_frames
@@ -251,7 +235,7 @@ def run_analysis(
"start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime(
"%A, %I:%M %p"
),
"duration": round(final_data["end_time"] - final_data["start_time"]),
"duration": final_data["end_time"] - final_data["start_time"],
}
objects = []
@@ -275,7 +259,6 @@ def run_analysis(
genai_config.additional_concerns,
genai_config.preferred_language,
genai_config.debug_save_thumbnails,
genai_config.activity_context_prompt,
)
review_inference_speed.update(datetime.datetime.now().timestamp() - start)

View File

@@ -22,8 +22,6 @@ class DataProcessorMetrics:
yolov9_lpr_pps: Synchronized
review_desc_speed: Synchronized
review_desc_dps: Synchronized
object_desc_speed: Synchronized
object_desc_dps: Synchronized
classification_speeds: dict[str, Synchronized]
classification_cps: dict[str, Synchronized]
@@ -40,8 +38,6 @@ class DataProcessorMetrics:
self.yolov9_lpr_pps = manager.Value("d", 0.0)
self.review_desc_speed = manager.Value("d", 0.0)
self.review_desc_dps = manager.Value("d", 0.0)
self.object_desc_speed = manager.Value("d", 0.0)
self.object_desc_dps = manager.Value("d", 0.0)
self.classification_speeds = manager.dict()
self.classification_cps = manager.dict()

View File

@@ -424,8 +424,7 @@ def get_optimized_runner(
) -> BaseModelRunner:
"""Get an optimized runner for the hardware."""
device = device or "AUTO"
if device != "CPU" and is_rknn_compatible(model_path):
if is_rknn_compatible(model_path):
rknn_path = auto_convert_model(model_path)
if rknn_path:

View File

@@ -3,10 +3,14 @@
import base64
import datetime
import logging
import os
import threading
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
from pathlib import Path
from typing import Any, Optional
import cv2
import numpy as np
from peewee import DoesNotExist
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
@@ -26,12 +30,16 @@ from frigate.comms.recordings_updater import (
RecordingsDataTypeEnum,
)
from frigate.comms.review_updater import ReviewDataSubscriber
from frigate.config import FrigateConfig
from frigate.config import CameraConfig, FrigateConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber,
)
from frigate.const import (
CLIPS_DIR,
UPDATE_EVENT_DESCRIPTION,
)
from frigate.data_processing.common.license_plate.model import (
LicensePlateModelRunner,
)
@@ -42,7 +50,6 @@ from frigate.data_processing.post.audio_transcription import (
from frigate.data_processing.post.license_plate import (
LicensePlatePostProcessor,
)
from frigate.data_processing.post.object_descriptions import ObjectDescriptionProcessor
from frigate.data_processing.post.review_descriptions import ReviewDescriptionProcessor
from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor
from frigate.data_processing.real_time.api import RealTimeProcessorApi
@@ -60,8 +67,13 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import get_genai_client
from frigate.models import Event, Recordings, ReviewSegment, Trigger
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize
from frigate.util.image import SharedMemoryFrameManager
from frigate.util.image import (
SharedMemoryFrameManager,
calculate_region,
ensure_jpeg_bytes,
)
from frigate.util.path import get_event_thumbnail_bytes
from .embeddings import Embeddings
@@ -223,30 +235,20 @@ class EmbeddingMaintainer(threading.Thread):
AudioTranscriptionPostProcessor(self.config, self.requestor, metrics)
)
semantic_trigger_processor: SemanticTriggerProcessor | None = None
if self.config.semantic_search.enabled:
semantic_trigger_processor = SemanticTriggerProcessor(
db,
self.config,
self.requestor,
metrics,
self.embeddings,
)
self.post_processors.append(semantic_trigger_processor)
if any(c.objects.genai.enabled_in_config for c in self.config.cameras.values()):
self.post_processors.append(
ObjectDescriptionProcessor(
SemanticTriggerProcessor(
db,
self.config,
self.embeddings,
self.requestor,
self.metrics,
self.genai_client,
semantic_trigger_processor,
metrics,
self.embeddings,
)
)
self.stop_event = stop_event
self.tracked_events: dict[str, list[Any]] = {}
self.early_request_sent: dict[str, bool] = {}
# recordings data
self.recordings_available_through: dict[str, float] = {}
@@ -335,8 +337,11 @@ class EmbeddingMaintainer(threading.Thread):
camera_config = self.config.cameras[camera]
# no need to process updated objects if no processors are active
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
# no need to process updated objects if face recognition, lpr, genai are disabled
if (
not camera_config.objects.genai.enabled
and len(self.realtime_processors) == 0
):
return
# Create our own thumbnail based on the bounding box and the frame time
@@ -356,17 +361,57 @@ class EmbeddingMaintainer(threading.Thread):
for processor in self.realtime_processors:
processor.process_frame(data, yuv_frame)
for processor in self.post_processors:
if isinstance(processor, ObjectDescriptionProcessor):
processor.process_data(
{
"camera": camera,
"data": data,
"state": "update",
"yuv_frame": yuv_frame,
},
PostProcessDataEnum.tracked_object,
)
# no need to save our own thumbnails if genai is not enabled
# or if the object has become stationary
if self.genai_client is not None and not data["stationary"]:
if data["id"] not in self.tracked_events:
self.tracked_events[data["id"]] = []
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
# Limit the number of thumbnails saved
if len(self.tracked_events[data["id"]]) >= MAX_THUMBNAILS:
# Always keep the first thumbnail for the event
self.tracked_events[data["id"]].pop(1)
self.tracked_events[data["id"]].append(data)
# check if we're configured to send an early request after a minimum number of updates received
if (
self.genai_client is not None
and camera_config.objects.genai.send_triggers.after_significant_updates
):
if (
len(self.tracked_events.get(data["id"], []))
>= camera_config.objects.genai.send_triggers.after_significant_updates
and data["id"] not in self.early_request_sent
):
if data["has_clip"] and data["has_snapshot"]:
event: Event = Event.get(Event.id == data["id"])
if (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
) and (
not camera_config.objects.genai.required_zones
or set(data["entered_zones"])
& set(camera_config.objects.genai.required_zones)
):
logger.debug(f"{camera} sending early request to GenAI")
self.early_request_sent[data["id"]] = True
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
[
data["thumbnail"]
for data in self.tracked_events[data["id"]]
],
),
).start()
self.frame_manager.close(frame_name)
@@ -379,13 +424,12 @@ class EmbeddingMaintainer(threading.Thread):
break
event_id, camera, updated_db = ended
camera_config = self.config.cameras[camera]
# expire in realtime processors
for processor in self.realtime_processors:
processor.expire_object(event_id, camera)
thumbnail: bytes | None = None
if updated_db:
try:
event: Event = Event.get(Event.id == event_id)
@@ -402,6 +446,23 @@ class EmbeddingMaintainer(threading.Thread):
# Embed the thumbnail
self._embed_thumbnail(event_id, thumbnail)
# Run GenAI
if (
camera_config.objects.genai.enabled
and camera_config.objects.genai.send_triggers.tracked_object_end
and self.genai_client is not None
and (
not camera_config.objects.genai.objects
or event.label in camera_config.objects.genai.objects
)
and (
not camera_config.objects.genai.required_zones
or set(event.zones)
& set(camera_config.objects.genai.required_zones)
)
):
self._process_genai_description(event, camera_config, thumbnail)
# call any defined post processors
for processor in self.post_processors:
if isinstance(processor, LicensePlatePostProcessor):
@@ -431,25 +492,16 @@ class EmbeddingMaintainer(threading.Thread):
{"event_id": event_id, "camera": camera, "type": "image"},
PostProcessDataEnum.tracked_object,
)
elif isinstance(processor, ObjectDescriptionProcessor):
if not updated_db:
continue
processor.process_data(
{
"event": event,
"camera": camera,
"state": "finalize",
"thumbnail": thumbnail,
},
PostProcessDataEnum.tracked_object,
)
else:
processor.process_data(
{"event_id": event_id, "camera": camera},
PostProcessDataEnum.tracked_object,
)
# Delete tracked events based on the event_id
if event_id in self.tracked_events:
del self.tracked_events[event_id]
def _expire_dedicated_lpr(self) -> None:
"""Remove plates not seen for longer than expiration timeout for dedicated lpr cameras."""
now = datetime.datetime.now().timestamp()
@@ -518,16 +570,9 @@ class EmbeddingMaintainer(threading.Thread):
event_id, source, force = payload
if event_id:
for processor in self.post_processors:
if isinstance(processor, ObjectDescriptionProcessor):
processor.handle_request(
"regenerate_description",
{
"event_id": event_id,
"source": RegenerateDescriptionEnum(source),
"force": force,
},
)
self.handle_regenerate_description(
event_id, RegenerateDescriptionEnum(source), force
)
def _process_frame_updates(self) -> None:
"""Process event updates"""
@@ -577,9 +622,208 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager.close(frame_name)
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame."""
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
region = calculate_region(
frame.shape, box[0], box[1], box[2], box[3], height, multiplier=1.4
)
frame = frame[region[1] : region[3], region[0] : region[2]]
width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if ret:
return jpg.tobytes()
return None
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
"""Embed the thumbnail for an event."""
if not self.config.semantic_search.enabled:
return
self.embeddings.embed_thumbnail(event_id, thumbnail)
def _process_genai_description(
self, event: Event, camera_config: CameraConfig, thumbnail
) -> None:
if event.has_snapshot and camera_config.objects.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
return
num_thumbnails = len(self.tracked_events.get(event.id, []))
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.objects.genai.use_snapshot
else (
[data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.objects.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
parents=True, exist_ok=True
)
for idx, data in enumerate(self.tracked_events[event.id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(f"Unable to save thumbnail {idx} for {event.id}.")
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event."""
camera_config = self.config.cameras[event.camera]
description = self.genai_client.generate_object_description(
camera_config, thumbnails, event
)
if not description:
logger.debug("Failed to generate description for %s", event.id)
return
# fire and forget description update
self.requestor.send_data(
UPDATE_EVENT_DESCRIPTION,
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
"camera": event.camera,
},
)
# Embed the description
if self.config.semantic_search.enabled:
self.embeddings.embed_description(event.id, description)
# Check semantic trigger for this description
for processor in self.post_processors:
if isinstance(processor, SemanticTriggerProcessor):
processor.process_data(
{"event_id": event.id, "camera": event.camera, "type": "text"},
PostProcessDataEnum.tracked_object,
)
else:
continue
logger.debug(
"Generated description for %s (%d images): %s",
event.id,
len(thumbnails),
description,
)
def _read_and_crop_snapshot(self, event: Event, camera_config) -> bytes | None:
"""Read, decode, and crop the snapshot image."""
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}"
)
return None
try:
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# Crop snapshot based on region
# provide full image if region doesn't exist (manual events)
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data.get(
"region", [0, 0, 1, 1]
)
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
return buffer.tobytes()
except Exception:
return None
def handle_regenerate_description(
self, event_id: str, source: str, force: bool
) -> None:
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
logger.error(f"Event {event_id} not found for description regeneration")
return
if self.genai_client is None:
logger.error("GenAI not enabled")
return
camera_config = self.config.cameras[event.camera]
if not camera_config.objects.genai.enabled and not force:
logger.error(f"GenAI not enabled for camera {event.camera}")
return
thumbnail = get_event_thumbnail_bytes(event)
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
logger.debug(
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
)
if event.has_snapshot and source == "snapshot":
snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
return
embed_image = (
[snapshot_image]
if event.has_snapshot and source == "snapshot"
else (
[data["thumbnail"] for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail]
)
)
self._genai_embed_description(event, embed_image)

View File

@@ -32,7 +32,7 @@ def register_genai_provider(key: GenAIProviderEnum):
class GenAIClient:
"""Generative AI client for Frigate."""
def __init__(self, genai_config: GenAIConfig, timeout: int = 120) -> None:
def __init__(self, genai_config: GenAIConfig, timeout: int = 60) -> None:
self.genai_config: GenAIConfig = genai_config
self.timeout = timeout
self.provider = self._init_provider()
@@ -44,7 +44,6 @@ class GenAIClient:
concerns: list[str],
preferred_language: str | None,
debug_save: bool,
activity_context_prompt: str,
) -> ReviewMetadata | None:
"""Generate a description for the review item activity."""
@@ -66,36 +65,29 @@ class GenAIClient:
context_prompt = f"""
Please analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
**Normal activity patterns for this property:**
{activity_context_prompt}
Your task is to provide a clear, accurate description of the scene that:
Your task is to provide a clear, security-focused description of the scene that:
1. States exactly what is happening based on observable actions and movements.
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
2. Identifies and emphasizes behaviors that match patterns of suspicious activity.
3. Assigns a potential_threat_level based on the definitions below, applying them consistently.
**IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.**
Facts come first, but identifying security risks is the primary goal.
When forming your description:
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
- Describe what you observe: actions, movements, interactions with objects and the environment. Include any observable environmental changes (e.g., lighting changes triggered by activity).
- Note visible details such as clothing, items being carried or placed, tools or equipment present, and how they interact with the property or objects.
- Consider the full sequence chronologically: what happens from start to finish, how duration and actions relate to the location and objects involved.
- **Use the actual timestamp provided in "Activity started at"** below for time of day context—do not infer time from image brightness or darkness. Unusual hours (late night/early morning) should increase suspicion when the observable behavior itself appears questionable. However, recognize that some legitimate activities can occur at any hour.
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
- **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention.
- Describe the people and objects exactly as seen. Include any observable environmental changes (e.g., lighting changes triggered by activity).
- Time of day should **increase suspicion only when paired with unusual or security-relevant behaviors**. Do not raise the threat level for common residential activities (e.g., residents walking pets, retrieving mail, gardening, playing with pets, supervising children) even at unusual hours, unless other suspicious indicators are present.
- Focus on behaviors that are uncharacteristic of innocent activity: loitering without clear purpose, avoiding cameras, inspecting vehicles/doors, changing behavior when lights activate, scanning surroundings without an apparent benign reason.
- **Benign context override**: If scanning or looking around is clearly part of an innocent activity (such as playing with a dog, gardening, supervising children, or watching for a pet), do not treat it as suspicious.
Your response MUST be a flat JSON object with:
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
- `scene` (string): A full description including setting, entities, actions, and any plausible supported inferences.
- `confidence` (float): 0-1 confidence in the analysis.
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
{get_concern_prompt()}
Threat-level definitions:
- 0 — **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidence—considering zone context, detected objects, and timing together—supports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
- 1 — **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.**
- 2 — **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
- 0 — Typical or expected activity for this location/time (includes residents, guests, or known animals engaged in normal activities, even if they glance around or scan surroundings).
- 1 — Unusual or suspicious activity: At least one security-relevant behavior is present **and not explainable by a normal residential activity**.
- 2 — Active or immediate threat: Breaking in, vandalism, aggression, weapon display.
Sequence details:
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
@@ -106,7 +98,6 @@ Sequence details:
**IMPORTANT:**
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
{get_language_prompt()}
"""
logger.debug(
@@ -234,9 +225,9 @@ Rules for the report:
) -> Optional[str]:
"""Generate a description for the frame."""
try:
prompt = camera_config.objects.genai.object_prompts.get(
prompt = camera_config.genai.object_prompts.get(
event.label,
camera_config.objects.genai.prompt,
camera_config.genai.prompt,
).format(**model_to_dict(event))
except KeyError as e:
logger.error(f"Invalid key in GenAI prompt: {e}")
@@ -253,10 +244,6 @@ Rules for the report:
"""Submit a request to the provider."""
return None
def get_context_size(self) -> int:
"""Get the context window size for this provider in tokens."""
return 4096
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
"""Get the GenAI client."""

View File

@@ -71,7 +71,3 @@ class OpenAIClient(GenAIClient):
if len(result.choices) > 0:
return result.choices[0].message.content.strip()
return None
def get_context_size(self) -> int:
"""Get the context window size for Azure OpenAI."""
return 128000

View File

@@ -53,8 +53,3 @@ class GeminiClient(GenAIClient):
# No description was generated
return None
return description
def get_context_size(self) -> int:
"""Get the context window size for Gemini."""
# Gemini Pro Vision has a 1M token context window
return 1000000

View File

@@ -54,9 +54,3 @@ class OllamaClient(GenAIClient):
except (TimeoutException, ResponseError) as e:
logger.warning("Ollama returned an error: %s", str(e))
return None
def get_context_size(self) -> int:
"""Get the context window size for Ollama."""
return self.genai_config.provider_options.get("options", {}).get(
"num_ctx", 4096
)

View File

@@ -66,8 +66,3 @@ class OpenAIClient(GenAIClient):
except (TimeoutException, Exception) as e:
logger.warning("OpenAI returned an error: %s", str(e))
return None
def get_context_size(self) -> int:
"""Get the context window size for OpenAI."""
# OpenAI GPT-4 Vision models have 128K token context window
return 128000

View File

@@ -361,14 +361,6 @@ def stats_snapshot(
embeddings_metrics.review_desc_dps.value, 2
)
if embeddings_metrics.object_desc_speed.value > 0.0:
stats["embeddings"]["object_description_speed"] = round(
embeddings_metrics.object_desc_speed.value * 1000, 2
)
stats["embeddings"]["object_descriptions"] = round(
embeddings_metrics.object_desc_dps.value, 2
)
for key in embeddings_metrics.classification_speeds.keys():
stats["embeddings"][f"{key}_classification_speed"] = round(
embeddings_metrics.classification_speeds[key].value * 1000, 2

View File

@@ -17,11 +17,7 @@ from frigate.camera import PTZMetrics
from frigate.config import CameraConfig
from frigate.ptz.autotrack import PtzMotionEstimator
from frigate.track import ObjectTracker
from frigate.track.stationary_classifier import (
StationaryMotionClassifier,
StationaryThresholds,
get_stationary_threshold,
)
from frigate.track.stationary_classifier import StationaryMotionClassifier
from frigate.util.image import (
SharedMemoryFrameManager,
get_histogram,
@@ -32,6 +28,12 @@ from frigate.util.object import average_boxes, median_of_boxes
logger = logging.getLogger(__name__)
THRESHOLD_KNOWN_ACTIVE_IOU = 0.2
THRESHOLD_STATIONARY_CHECK_IOU = 0.6
THRESHOLD_ACTIVE_CHECK_IOU = 0.9
MAX_STATIONARY_HISTORY = 10
# Normalizes distance from estimate relative to object size
# Other ideas:
# - if estimates are inaccurate for first N detections, compare with last_detection (may be fine)
@@ -326,7 +328,6 @@ class NorfairTracker(ObjectTracker):
id: str,
box: list[int],
stationary: bool,
thresholds: StationaryThresholds,
yuv_frame: np.ndarray | None,
) -> bool:
def reset_position(xmin: int, ymin: int, xmax: int, ymax: int) -> None:
@@ -345,9 +346,9 @@ class NorfairTracker(ObjectTracker):
position = self.positions[id]
self.stationary_box_history[id].append(box)
if len(self.stationary_box_history[id]) > thresholds.max_stationary_history:
if len(self.stationary_box_history[id]) > MAX_STATIONARY_HISTORY:
self.stationary_box_history[id] = self.stationary_box_history[id][
-thresholds.max_stationary_history :
-MAX_STATIONARY_HISTORY:
]
avg_box = average_boxes(self.stationary_box_history[id])
@@ -366,7 +367,7 @@ class NorfairTracker(ObjectTracker):
# object has minimal or zero iou
# assume object is active
if avg_iou < thresholds.known_active_iou:
if avg_iou < THRESHOLD_KNOWN_ACTIVE_IOU:
if stationary and yuv_frame is not None:
if not self.stationary_classifier.evaluate(
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
@@ -378,9 +379,7 @@ class NorfairTracker(ObjectTracker):
return False
threshold = (
thresholds.stationary_check_iou
if stationary
else thresholds.active_check_iou
THRESHOLD_STATIONARY_CHECK_IOU if stationary else THRESHOLD_ACTIVE_CHECK_IOU
)
# object has iou below threshold, check median and optionally crop similarity
@@ -448,7 +447,6 @@ class NorfairTracker(ObjectTracker):
self,
track_id: str,
obj: dict[str, Any],
thresholds: StationaryThresholds,
yuv_frame: np.ndarray | None,
) -> None:
id = self.track_id_map[track_id]
@@ -458,7 +456,7 @@ class NorfairTracker(ObjectTracker):
>= self.detect_config.stationary.threshold
)
# update the motionless count if the object has not moved to a new position
if self.update_position(id, obj["box"], stationary, thresholds, yuv_frame):
if self.update_position(id, obj["box"], stationary, yuv_frame):
self.tracked_objects[id]["motionless_count"] += 1
if self.is_expired(id):
self.deregister(id, track_id)
@@ -504,9 +502,9 @@ class NorfairTracker(ObjectTracker):
detections_by_type: dict[str, list[Detection]] = {}
yuv_frame: np.ndarray | None = None
if (
self.ptz_metrics.autotracker_enabled.value
or self.detect_config.stationary.classifier
if self.ptz_metrics.autotracker_enabled.value or (
self.detect_config.stationary.classifier
and any(obj[0] == "car" for obj in detections)
):
yuv_frame = self.frame_manager.get(
frame_name, self.camera_config.frame_shape_yuv
@@ -616,12 +614,10 @@ class NorfairTracker(ObjectTracker):
self.tracked_objects[id]["estimate"] = new_obj["estimate"]
# else update it
else:
thresholds = get_stationary_threshold(new_obj["label"])
self.update(
str(t.global_id),
new_obj,
thresholds,
yuv_frame if thresholds.motion_classifier_enabled else None,
yuv_frame if new_obj["label"] == "car" else None,
)
# clear expired tracks

View File

@@ -1,7 +1,6 @@
"""Tools for determining if an object is stationary."""
import logging
from dataclasses import dataclass, field
from typing import Any, cast
import cv2
@@ -11,61 +10,10 @@ from scipy.ndimage import gaussian_filter
logger = logging.getLogger(__name__)
@dataclass
class StationaryThresholds:
"""IOU thresholds and history parameters for stationary object classification.
This allows different sensitivity settings for different object types.
"""
# Objects to apply these thresholds to
# If None, apply to all objects
objects: list[str] = field(default_factory=list)
# Threshold of IoU that causes the object to immediately be considered active
# Below this threshold, assume object is active
known_active_iou: float = 0.2
# IOU threshold for checking if stationary object has moved
# If mean and median IOU drops below this, assume object is no longer stationary
stationary_check_iou: float = 0.6
# IOU threshold for checking if active object has changed position
# Higher threshold makes it more difficult for the object to be considered stationary
active_check_iou: float = 0.9
# Maximum number of bounding boxes to keep in stationary history
max_stationary_history: int = 10
# Whether to use the motion classifier
motion_classifier_enabled: bool = False
# Thresholds for objects that are expected to be stationary
STATIONARY_OBJECT_THRESHOLDS = StationaryThresholds(
objects=["bbq_grill", "package", "waste_bin"],
known_active_iou=0.0,
motion_classifier_enabled=True,
)
# Thresholds for objects that are active but can be stationary for longer periods of time
DYNAMIC_OBJECT_THRESHOLDS = StationaryThresholds(
objects=["bicycle", "boat", "car", "motorcycle", "tractor", "truck"],
active_check_iou=0.75,
motion_classifier_enabled=True,
)
def get_stationary_threshold(label: str) -> StationaryThresholds:
"""Get the stationary thresholds for a given object label."""
if label in STATIONARY_OBJECT_THRESHOLDS.objects:
return STATIONARY_OBJECT_THRESHOLDS
if label in DYNAMIC_OBJECT_THRESHOLDS.objects:
return DYNAMIC_OBJECT_THRESHOLDS
return StationaryThresholds()
THRESHOLD_KNOWN_ACTIVE_IOU = 0.2
THRESHOLD_STATIONARY_CHECK_IOU = 0.6
THRESHOLD_ACTIVE_CHECK_IOU = 0.9
MAX_STATIONARY_HISTORY = 10
class StationaryMotionClassifier:

View File

@@ -995,26 +995,7 @@ def get_histogram(image, x_min, y_min, x_max, y_max):
return cv2.normalize(hist, hist).flatten()
def create_thumbnail(
yuv_frame: np.ndarray, box: tuple[int, int, int, int], height=500
) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame."""
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
region = calculate_region(
frame.shape, box[0], box[1], box[2], box[3], height, multiplier=1.4
)
frame = frame[region[1] : region[3], region[0] : region[2]]
width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if ret:
return jpg.tobytes()
return None
def ensure_jpeg_bytes(image_data: bytes) -> bytes:
def ensure_jpeg_bytes(image_data):
"""Ensure image data is jpeg bytes for genai"""
try:
img_array = np.frombuffer(image_data, dtype=np.uint8)

View File

@@ -1,163 +0,0 @@
#!/usr/bin/env python3
"""
Generate English translation JSON files from Pydantic config models.
This script dynamically extracts all top-level config sections from FrigateConfig
and generates JSON translation files with titles and descriptions for the web UI.
"""
import json
import logging
import shutil
from pathlib import Path
from typing import Any, Dict, Optional, get_args, get_origin
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from frigate.config.config import FrigateConfig
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def get_field_translations(field_info: FieldInfo) -> Dict[str, str]:
"""Extract title and description from a Pydantic field."""
translations = {}
if field_info.title:
translations["label"] = field_info.title
if field_info.description:
translations["description"] = field_info.description
return translations
def process_model_fields(model: type[BaseModel]) -> Dict[str, Any]:
"""
Recursively process a Pydantic model to extract translations.
Returns a nested dictionary structure matching the config schema,
with title and description for each field.
"""
translations = {}
model_fields = model.model_fields
for field_name, field_info in model_fields.items():
field_translations = get_field_translations(field_info)
# Get the field's type annotation
field_type = field_info.annotation
# Handle Optional types
origin = get_origin(field_type)
if origin is Optional or (
hasattr(origin, "__name__") and origin.__name__ == "UnionType"
):
args = get_args(field_type)
field_type = next(
(arg for arg in args if arg is not type(None)), field_type
)
# Handle Dict types (like Dict[str, CameraConfig])
if get_origin(field_type) is dict:
dict_args = get_args(field_type)
if len(dict_args) >= 2:
value_type = dict_args[1]
if isinstance(value_type, type) and issubclass(value_type, BaseModel):
nested_translations = process_model_fields(value_type)
if nested_translations:
field_translations["properties"] = nested_translations
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
nested_translations = process_model_fields(field_type)
if nested_translations:
field_translations["properties"] = nested_translations
if field_translations:
translations[field_name] = field_translations
return translations
def generate_section_translation(
section_name: str, field_info: FieldInfo
) -> Dict[str, Any]:
"""
Generate translation structure for a top-level config section.
"""
section_translations = get_field_translations(field_info)
field_type = field_info.annotation
origin = get_origin(field_type)
if origin is Optional or (
hasattr(origin, "__name__") and origin.__name__ == "UnionType"
):
args = get_args(field_type)
field_type = next((arg for arg in args if arg is not type(None)), field_type)
# Handle Dict types (like detectors, cameras, camera_groups)
if get_origin(field_type) is dict:
dict_args = get_args(field_type)
if len(dict_args) >= 2:
value_type = dict_args[1]
if isinstance(value_type, type) and issubclass(value_type, BaseModel):
nested = process_model_fields(value_type)
if nested:
section_translations["properties"] = nested
# If the field itself is a BaseModel, process it
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
nested = process_model_fields(field_type)
if nested:
section_translations["properties"] = nested
return section_translations
def main():
"""Main function to generate config translations."""
# Define output directory
output_dir = Path(__file__).parent / "web" / "public" / "locales" / "en" / "config"
logger.info(f"Output directory: {output_dir}")
# Clean and recreate the output directory
if output_dir.exists():
logger.info(f"Removing existing directory: {output_dir}")
shutil.rmtree(output_dir)
logger.info(f"Creating directory: {output_dir}")
output_dir.mkdir(parents=True, exist_ok=True)
config_fields = FrigateConfig.model_fields
logger.info(f"Found {len(config_fields)} top-level config sections")
for field_name, field_info in config_fields.items():
if field_name.startswith("_"):
continue
logger.info(f"Processing section: {field_name}")
section_data = generate_section_translation(field_name, field_info)
if not section_data:
logger.warning(f"No translations found for section: {field_name}")
continue
output_file = output_dir / f"{field_name}.json"
with open(output_file, "w", encoding="utf-8") as f:
json.dump(section_data, f, indent=2, ensure_ascii=False)
logger.info(f"Generated: {output_file}")
logger.info("Translation generation complete!")
if __name__ == "__main__":
main()

View File

@@ -4,8 +4,8 @@
"rsc": false,
"tsx": true,
"tailwind": {
"config": "tailwind.config.cjs",
"css": "src/index.css",
"config": "tailwind.config.js",
"css": "index.css",
"baseColor": "slate",
"cssVariables": true
},

752
web/package-lock.json generated
View File

@@ -15,7 +15,7 @@
"@radix-ui/react-aspect-ratio": "^1.1.2",
"@radix-ui/react-checkbox": "^1.1.4",
"@radix-ui/react-context-menu": "^2.2.6",
"@radix-ui/react-dialog": "^1.1.15",
"@radix-ui/react-dialog": "^1.1.6",
"@radix-ui/react-dropdown-menu": "^2.1.6",
"@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.2",
@@ -23,14 +23,14 @@
"@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.6",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-separator": "^1.1.2",
"@radix-ui/react-slider": "^1.2.3",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-slot": "^1.2.2",
"@radix-ui/react-switch": "^1.1.3",
"@radix-ui/react-tabs": "^1.1.3",
"@radix-ui/react-toggle": "^1.1.2",
"@radix-ui/react-toggle-group": "^1.1.2",
"@radix-ui/react-tooltip": "^1.2.8",
"@radix-ui/react-tooltip": "^1.1.8",
"apexcharts": "^3.52.0",
"axios": "^1.7.7",
"class-variance-authority": "^0.7.1",
@@ -1250,42 +1250,6 @@
}
}
},
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-dialog": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.6.tgz",
"integrity": "sha512-/IVhJV5AceX620DUJ4uYVMymzsipdKBzo3edo+omeskCKGm9FRHM0ebIdbPnlQVJqyuHbuBltQUOG2mOTq2IYw==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.1",
"@radix-ui/react-compose-refs": "1.1.1",
"@radix-ui/react-context": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.5",
"@radix-ui/react-focus-guards": "1.1.1",
"@radix-ui/react-focus-scope": "1.1.2",
"@radix-ui/react-id": "1.1.0",
"@radix-ui/react-portal": "1.1.4",
"@radix-ui/react-presence": "1.1.2",
"@radix-ui/react-primitive": "2.0.2",
"@radix-ui/react-slot": "1.1.2",
"@radix-ui/react-use-controllable-state": "1.1.0",
"aria-hidden": "^1.2.4",
"react-remove-scroll": "^2.6.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
@@ -1483,23 +1447,23 @@
}
},
"node_modules/@radix-ui/react-dialog": {
"version": "1.1.15",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz",
"integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==",
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.6.tgz",
"integrity": "sha512-/IVhJV5AceX620DUJ4uYVMymzsipdKBzo3edo+omeskCKGm9FRHM0ebIdbPnlQVJqyuHbuBltQUOG2mOTq2IYw==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-dismissable-layer": "1.1.11",
"@radix-ui/react-focus-guards": "1.1.3",
"@radix-ui/react-focus-scope": "1.1.7",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-portal": "1.1.9",
"@radix-ui/react-presence": "1.1.5",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-use-controllable-state": "1.2.2",
"@radix-ui/primitive": "1.1.1",
"@radix-ui/react-compose-refs": "1.1.1",
"@radix-ui/react-context": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.5",
"@radix-ui/react-focus-guards": "1.1.1",
"@radix-ui/react-focus-scope": "1.1.2",
"@radix-ui/react-id": "1.1.0",
"@radix-ui/react-portal": "1.1.4",
"@radix-ui/react-presence": "1.1.2",
"@radix-ui/react-primitive": "2.0.2",
"@radix-ui/react-slot": "1.1.2",
"@radix-ui/react-use-controllable-state": "1.1.0",
"aria-hidden": "^1.2.4",
"react-remove-scroll": "^2.6.3"
},
@@ -1518,255 +1482,14 @@
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/primitive": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
"license": "MIT"
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-compose-refs": {
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
"integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz",
"integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
"integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-escape-keydown": "1.1.1"
"@radix-ui/react-compose-refs": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-guards": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz",
"integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-scope": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
"integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-id": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz",
"integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-presence": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz",
"integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
"integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-controllable-state": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz",
"integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-effect-event": "0.0.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz",
"integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
@@ -2350,35 +2073,12 @@
}
},
"node_modules/@radix-ui/react-separator": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz",
"integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==",
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.2.tgz",
"integrity": "sha512-oZfHcaAp2Y6KFBX6I5P1u7CQoy4lheCGiYj+pGFrHy8E/VNRb5E39TkTr3JrV520csPBTZjkuKFdEsjS5EUNKQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
"@radix-ui/react-primitive": "2.0.2"
},
"peerDependencies": {
"@types/react": "*",
@@ -2429,9 +2129,9 @@
}
},
"node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.2.tgz",
"integrity": "sha512-y7TBO4xN4Y94FvcWIOIh18fM4R1A8S4q1jhoz4PNzOoHsFcN8pogcFmZrTYAm4F9VRUrWP/Mw7xSKybIeRI+CQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
@@ -2575,23 +2275,23 @@
}
},
"node_modules/@radix-ui/react-tooltip": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz",
"integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==",
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.8.tgz",
"integrity": "sha512-YAA2cu48EkJZdAMHC0dqo9kialOcRStbtiY4nJPaht7Ptrhcvpo+eDChaM6BIs8kL6a8Z5l5poiqLnXcNduOkA==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-dismissable-layer": "1.1.11",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-popper": "1.2.8",
"@radix-ui/react-portal": "1.1.9",
"@radix-ui/react-presence": "1.1.5",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-use-controllable-state": "1.2.2",
"@radix-ui/react-visually-hidden": "1.2.3"
"@radix-ui/primitive": "1.1.1",
"@radix-ui/react-compose-refs": "1.1.1",
"@radix-ui/react-context": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.5",
"@radix-ui/react-id": "1.1.0",
"@radix-ui/react-popper": "1.2.2",
"@radix-ui/react-portal": "1.1.4",
"@radix-ui/react-presence": "1.1.2",
"@radix-ui/react-primitive": "2.0.2",
"@radix-ui/react-slot": "1.1.2",
"@radix-ui/react-use-controllable-state": "1.1.0",
"@radix-ui/react-visually-hidden": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
@@ -2608,99 +2308,13 @@
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/primitive": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
"license": "MIT"
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-arrow": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz",
"integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-compose-refs": {
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
"integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-context": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz",
"integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
"integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-escape-keydown": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-id": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz",
"integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
"@radix-ui/react-compose-refs": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
@@ -2712,241 +2326,6 @@
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-popper": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz",
"integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==",
"license": "MIT",
"dependencies": {
"@floating-ui/react-dom": "^2.0.0",
"@radix-ui/react-arrow": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-layout-effect": "1.1.1",
"@radix-ui/react-use-rect": "1.1.1",
"@radix-ui/react-use-size": "1.1.1",
"@radix-ui/rect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-presence": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz",
"integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
"integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-controllable-state": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz",
"integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-effect-event": "0.0.2",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz",
"integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-rect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz",
"integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==",
"license": "MIT",
"dependencies": {
"@radix-ui/rect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-use-size": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz",
"integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-visually-hidden": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz",
"integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/rect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz",
"integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==",
"license": "MIT"
},
"node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz",
@@ -2980,39 +2359,6 @@
}
}
},
"node_modules/@radix-ui/react-use-effect-event": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz",
"integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-use-effect-event/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz",
"integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz",

View File

@@ -21,7 +21,7 @@
"@radix-ui/react-aspect-ratio": "^1.1.2",
"@radix-ui/react-checkbox": "^1.1.4",
"@radix-ui/react-context-menu": "^2.2.6",
"@radix-ui/react-dialog": "^1.1.15",
"@radix-ui/react-dialog": "^1.1.6",
"@radix-ui/react-dropdown-menu": "^2.1.6",
"@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.2",
@@ -29,14 +29,14 @@
"@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.6",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-separator": "^1.1.2",
"@radix-ui/react-slider": "^1.2.3",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-slot": "^1.2.2",
"@radix-ui/react-switch": "^1.1.3",
"@radix-ui/react-tabs": "^1.1.3",
"@radix-ui/react-toggle": "^1.1.2",
"@radix-ui/react-toggle-group": "^1.1.2",
"@radix-ui/react-tooltip": "^1.2.8",
"@radix-ui/react-tooltip": "^1.1.8",
"apexcharts": "^3.52.0",
"axios": "^1.7.7",
"class-variance-authority": "^0.7.1",

View File

@@ -56,14 +56,7 @@
"formattedTimestampMonthDayYear": {
"12hour": "МММ д, гггг",
"24hour": "МММ д, гггг"
},
"ago": "Преди {{timeAgo}}",
"untilForTime": "До {{time}}",
"untilForRestart": "Докато Frigate рестартира.",
"untilRestart": "До рестарт",
"mo": "{{time}}мес",
"m": "{{time}}м",
"s": "{{time}}с"
}
},
"button": {
"apply": "Приложи",

View File

@@ -423,7 +423,7 @@
"paths": {
"title": "Cesty",
"desc": "Zobrazit významné body trasy sledovaného objektu",
"tips": "<p><strong>Cesty</strong></p><br><p>Čáry a kruhy označují významné body, kterými se sledovaný objekt během svého životního cyklu pohyboval.</p>"
"tips": "<p><strong>Cesty</strong></p><br><p>Čáry a kruhy označují významné body, kterými se sledovaný objekt během svého životního cyklu pohyboval."
}
},
"camera": {
@@ -604,8 +604,7 @@
"admin": "Správce",
"adminDesc": "Plný přístup ke všem funkcím.",
"viewer": "Divák",
"viewerDesc": "Omezení pouze na Živé dashboardy, Revize, Průzkumníka a Exporty.",
"customDesc": "Vlastní role s konkrétním přístupem ke kameře."
"viewerDesc": "Omezení pouze na Živé dashboardy, Revize, Průzkumníka a Exporty."
},
"title": "Změnit Roli Uživatele",
"desc": "Aktualizovat oprávnění pro <strong>{{username}}</strong>",
@@ -795,99 +794,9 @@
"title": "Obsah",
"imagePlaceholder": "Vybrat obrázek",
"textPlaceholder": "Zadat textový obsah",
"imageDesc": "Vybrat obrázek, který spustí tuto akci, když bude detekován podobný obrázek.",
"textDesc": "Zadejte text, který spustí tuto akci, když bude zjištěn podobný popis sledovaného objektu.",
"error": {
"required": "Obsah je povinný."
}
},
"actions": {
"title": "Akce",
"desc": "Ve výchozím nastavení Frigate odesílá MQTT zprávu pro všechny spouštěče. Zvolte dodatečnou akci, která se má provést, když se tento spouštěč aktivuje.",
"error": {
"min": "Musí být vybrána alespoň jedna akce."
}
},
"threshold": {
"title": "Práh",
"error": {
"min": "Práh musí být alespoň 0",
"max": "Práh musí být nanejvýš 1"
}
"imageDesc": "Vybrat obrázek, který spustí tuto akci, když bude detekován podobný obrázek."
}
}
},
"toast": {
"success": {
"createTrigger": "Spouštěč {{name}} byl úspěšně vytvořen.",
"updateTrigger": "Spouštěč {{name}} byl úspěšně aktualizován.",
"deleteTrigger": "Spouštěč {{name}} byl úspěšně smazán."
},
"error": {
"createTriggerFailed": "Nepodařilo se vytvořit spouštěč: {{errorMessage}}",
"updateTriggerFailed": "Nepodařilo se aktualizovat spouštěč: {{errorMessage}}",
"deleteTriggerFailed": "Nepodařilo se smazat spouštěč: {{errorMessage}}"
}
}
},
"roles": {
"addRole": "Přidat roli",
"table": {
"role": "Role",
"cameras": "Kamery",
"actions": "Akce",
"noRoles": "Nebyly nalezeny žádné vlastní role.",
"editCameras": "Upravit kamery",
"deleteRole": "Smazat roli"
},
"toast": {
"success": {
"createRole": "Role {{role}} byla úspěšně vytvořena",
"updateCameras": "Kamery byly aktualizovány pro roli {{role}}",
"deleteRole": "Role {{role}} byla úspěšně smazána",
"userRolesUpdated": "{{count}} uživatel(ů) přiřazených k této roli bylo aktualizováno na „Divák“, který má přístup ke všem kamerám."
},
"error": {
"createRoleFailed": "Nepodařilo se vytvořit roli: {{errorMessage}}",
"updateCamerasFailed": "Nepodařilo se aktualizovat kamery: {{errorMessage}}",
"deleteRoleFailed": "Nepodařilo se smazat roli: {{errorMessage}}",
"userUpdateFailed": "Nepodařilo se aktualizovat role uživatele: {{errorMessage}}"
}
},
"dialog": {
"createRole": {
"title": "Vytvořit novou roli",
"desc": "Přidejte novou roli a určete oprávnění k přístupu ke kamerám."
},
"deleteRole": {
"title": "Smazat roli",
"warn": "Opravdu chcete smazat roli <strong>{{role}}</strong>?",
"deleting": "Mazání...",
"desc": "Tuto akci nelze vrátit zpět. Role bude trvale smazána a všichni uživatelé s touto rolí budou přeřazeni do role „Divák“, která poskytne přístup ke všem kamerám."
},
"form": {
"role": {
"title": "Název role",
"placeholder": "Zadejte název role",
"desc": "Povolena jsou pouze písmena, čísla, tečky a podtržítka.",
"roleIsRequired": "Název role je povinný",
"roleOnlyInclude": "Název role smí obsahovat pouze písmena, čísla, . nebo _",
"roleExists": "Role s tímto názvem již existuje."
},
"cameras": {
"title": "Kamery",
"desc": "Vyberte kamery, ke kterým má tato role přístup. Je vyžadována alespoň jedna kamera.",
"required": "Musí být vybrána alespoň jedna kamera."
}
},
"editCameras": {
"desc": "Aktualizujte přístup ke kamerám pro roli <strong>{{role}}</strong>.",
"title": "Upravit kamery role"
}
},
"management": {
"title": "Správa role diváka",
"desc": "Spravujte vlastní role diváků a jejich oprávnění k přístupu ke kamerám pro tuto instanci Frigate."
}
}
}

View File

@@ -5,80 +5,5 @@
"moo": "Bučanie",
"cowbell": "Kravský zvonec",
"pig": "Prasa",
"speech": "Tale",
"bicycle": "Cykel",
"car": "Bil",
"bellow": "Under",
"motorcycle": "Motorcykel",
"whispering": "Hvisker",
"bus": "Bus",
"laughter": "Latter",
"train": "Tog",
"boat": "Båd",
"crying": "Græder",
"tambourine": "Tambourin",
"marimba": "Marimba",
"trumpet": "Trumpet",
"trombone": "Trombone",
"violin": "Violin",
"flute": "Fløjte",
"saxophone": "Saxofon",
"clarinet": "Klarinet",
"harp": "Harpe",
"bell": "Klokke",
"harmonica": "Harmonika",
"bagpipes": "Sækkepibe",
"didgeridoo": "Didgeridoo",
"jazz": "Jazz",
"opera": "Opera",
"dubstep": "Dubstep",
"blues": "Blues",
"song": "Sang",
"lullaby": "Vuggevise",
"wind": "Vind",
"thunderstorm": "Tordenvejr",
"thunder": "Torden",
"water": "Vand",
"rain": "Regn",
"raindrop": "Regndråbe",
"waterfall": "Vandfald",
"waves": "Bølger",
"fire": "Ild",
"vehicle": "Køretøj",
"sailboat": "Sejlbåd",
"rowboat": "Robåd",
"motorboat": "Motorbåd",
"ship": "Skib",
"ambulance": "Ambulance",
"helicopter": "Helikopter",
"skateboard": "Skateboard",
"chainsaw": "Motorsav",
"door": "Dør",
"doorbell": "Dørklokke",
"slam": "Smæk",
"knock": "Bank",
"squeak": "Knirke",
"dishes": "Tallerkener",
"cutlery": "Bestik",
"sink": "Håndvask",
"bathtub": "Badekar",
"toothbrush": "Tandbørste",
"zipper": "Lynlås",
"coin": "Mønt",
"scissors": "Saks",
"typewriter": "Skrivemaskine",
"alarm": "Alarm",
"telephone": "Telefon",
"ringtone": "Ringetone",
"siren": "Sirene",
"foghorn": "Tågehorn",
"whistle": "Fløjte",
"clock": "Ur",
"printer": "Printer",
"camera": "Kamera",
"tools": "Værktøj",
"hammer": "Hammer",
"drill": "Bore",
"explosion": "Eksplosion",
"fireworks": "Nytårskrudt"
"speech": "Tale"
}

View File

@@ -5,9 +5,7 @@
"login": "Log ind",
"errors": {
"usernameRequired": "Brugernavn kræves",
"passwordRequired": "Kodeord kræves",
"loginFailed": "Login fejlede",
"unknownError": "Ukendt fejl. Tjek logs."
"passwordRequired": "Kodeord kræves"
}
}
}

View File

@@ -1,17 +1,6 @@
{
"group": {
"label": "Kamera Grupper",
"add": "Tilføj Kameragruppe",
"edit": "Rediger Kamera Gruppe",
"delete": {
"label": "Slet kamera gruppe",
"confirm": {
"title": "Bekræft sletning",
"desc": "Er du sikker på at du vil slette kamera gruppen <em>{{name}}</em>?"
}
},
"name": {
"label": "Navn"
}
"add": "Tilføj Kameragruppe"
}
}

View File

@@ -1,9 +1 @@
{
"restart": {
"title": "Er du sikker på at du vil genstarte Frigate?",
"button": "Genstart",
"restarting": {
"title": "Frigate genstarter"
}
}
}
{}

View File

@@ -1,17 +1 @@
{
"filter": "Filter",
"classes": {
"label": "Klasser",
"all": {
"title": "Alle klasser"
},
"count_one": "{{count}} Klasse",
"count_other": "{{count}} Klasser"
},
"labels": {
"all": {
"short": "Labels"
},
"count_one": "{{count}} Label"
}
}
{}

View File

@@ -1,8 +1,5 @@
{
"iconPicker": {
"selectIcon": "Vælg et ikon",
"search": {
"placeholder": "Søg efter ikoner…"
}
"selectIcon": "Vælg et ikon"
}
}

View File

@@ -1,7 +1 @@
{
"button": {
"downloadVideo": {
"label": "Download Video"
}
}
}
{}

View File

@@ -1,5 +1 @@
{
"noRecordingsFoundForThisTime": "Ingen optagelser fundet i det angivet tidsrum",
"noPreviewFound": "Ingen forhåndsvisning fundet",
"cameraDisabled": "Kamera er deaktiveret"
}
{}

View File

@@ -1,18 +1,3 @@
{
"person": "Person",
"bicycle": "Cykel",
"car": "Bil",
"motorcycle": "Motorcykel",
"airplane": "Flyvemaskine",
"bus": "Bus",
"train": "Tog",
"boat": "Båd",
"traffic_light": "Trafiklys",
"vehicle": "Køretøj",
"skateboard": "Skateboard",
"door": "Dør",
"sink": "Håndvask",
"toothbrush": "Tandbørste",
"scissors": "Saks",
"clock": "Ur"
"person": "Person"
}

View File

@@ -1,6 +1 @@
{
"documentTitle": "Konfigurationsstyring - Frigate",
"copyConfig": "Kopiér konfiguration",
"saveAndRestart": "Gem & Genstart",
"saveOnly": "Kun gem"
}
{}

View File

@@ -1,11 +1 @@
{
"alerts": "Alarmer",
"detections": "Detekteringer",
"motion": {
"label": "Bevægelse",
"only": "Kun bevægelse"
},
"allCameras": "Alle kameraer",
"timeline": "Tidslinje",
"camera": "Kamera"
}
{}

View File

@@ -9,11 +9,5 @@
"lifecycleItemDesc": {
"active": "{{label}} blev aktiv"
}
},
"exploreIsUnavailable": {
"embeddingsReindexing": {
"startingUp": "Starter…",
"estimatedTime": "Estimeret tid tilbage:"
}
}
}

View File

@@ -1,9 +1,4 @@
{
"documentTitle": "Eksporter - Frigate",
"search": "Søg",
"deleteExport.desc": "Er du sikker på at du vil slette {{exportName}}?",
"editExport": {
"title": "Omdøb Eksport",
"saveExport": "Gem Eksport"
}
"search": "Søg"
}

View File

@@ -1,10 +1,3 @@
{
"selectItem": "Vælg {{item}}",
"description": {
"addFace": "Gennemgang af tilføjelse til ansigts bibliotek",
"placeholder": "Angiv et navn for bibliotek"
},
"details": {
"person": "Person"
}
"selectItem": "Vælg {{item}}"
}

View File

@@ -1,12 +1 @@
{
"documentTitle": "Live - Frigate",
"documentTitle.withCamera": "{{camera}} - Live - Frigate",
"twoWayTalk": {
"enable": "Aktivér tovejskommunikation",
"disable": "Deaktiver tovejskommunikation"
},
"cameraAudio": {
"enable": "Aktivér kameralyd",
"disable": "Deaktivér kamera lyd"
}
}
{}

View File

@@ -1,11 +1 @@
{
"filter": "Filter",
"export": "Eksporter",
"calendar": "Kalender",
"filters": "Filtere",
"toast": {
"error": {
"endTimeMustAfterStartTime": "Sluttidspunkt skal være efter starttidspunkt"
}
}
}
{}

View File

@@ -1,11 +1,3 @@
{
"search": "Søg",
"savedSearches": "Gemte Søgninger",
"searchFor": "Søg efter {{inputValue}}",
"button": {
"save": "Gem søgning",
"delete": "Slet gemt søgning",
"filterInformation": "Filter information",
"filterActive": "Filtre aktiv"
}
"search": "Søg"
}

View File

@@ -1,8 +1,5 @@
{
"documentTitle": {
"default": "Indstillinger - Frigate",
"authentication": "Bruger Indstillinger - Frigate",
"camera": "Kamera indstillinger - Frigate",
"object": "Debug - Frigate"
"default": "Indstillinger - Frigate"
}
}

View File

@@ -1,12 +1 @@
{
"documentTitle": {
"cameras": "Kamera Statistik - Frigate",
"storage": "Lagrings Statistik - Frigate",
"logs": {
"frigate": "Frigate Logs - Frigate",
"go2rtc": "Go2RTC Logs - Frigate",
"nginx": "Nginx Logs - Frigate"
}
},
"title": "System"
}
{}

View File

@@ -1,26 +0,0 @@
{
"label": "Global Audio events configuration.",
"properties": {
"enabled": {
"label": "Enable audio events."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event."
},
"min_volume": {
"label": "Min volume required to run audio detection."
},
"listen": {
"label": "Audio to listen for."
},
"filters": {
"label": "Audio filters."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection."
},
"num_threads": {
"label": "Number of detection threads"
}
}
}

View File

@@ -1,23 +0,0 @@
{
"label": "Audio transcription config.",
"properties": {
"enabled": {
"label": "Enable audio transcription."
},
"language": {
"label": "Language abbreviation to use for audio event transcription/translation."
},
"device": {
"label": "The device used for license plate recognition."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"enabled_in_config": {
"label": "Keep track of original state of camera."
},
"live_enabled": {
"label": "Enable live transcriptions."
}
}
}

View File

@@ -1,35 +0,0 @@
{
"label": "Auth configuration.",
"properties": {
"enabled": {
"label": "Enable authentication"
},
"reset_admin_password": {
"label": "Reset the admin password on startup"
},
"cookie_name": {
"label": "Name for jwt token cookie"
},
"cookie_secure": {
"label": "Set secure flag on cookie"
},
"session_length": {
"label": "Session length for jwt session tokens"
},
"refresh_time": {
"label": "Refresh the session if it is going to expire in this many seconds"
},
"failed_login_rate_limit": {
"label": "Rate limits for failed login attempts."
},
"trusted_proxies": {
"label": "Trusted proxies for determining IP address to rate limit"
},
"hash_iterations": {
"label": "Password hash iterations"
},
"roles": {
"label": "Role to camera mappings. Empty list grants access to all cameras."
}
}
}

View File

@@ -1,37 +0,0 @@
{
"label": "Birdseye configuration.",
"properties": {
"enabled": {
"label": "Enable birdseye view."
},
"mode": {
"label": "Tracking mode."
},
"restream": {
"label": "Restream birdseye via RTSP."
},
"width": {
"label": "Birdseye width."
},
"height": {
"label": "Birdseye height."
},
"quality": {
"label": "Encoding quality."
},
"inactivity_threshold": {
"label": "Birdseye Inactivity Threshold"
},
"layout": {
"label": "Birdseye Layout Config",
"properties": {
"scaling_factor": {
"label": "Birdseye Scaling Factor"
},
"max_cameras": {
"label": "Max cameras"
}
}
}
}
}

View File

@@ -1,14 +0,0 @@
{
"label": "Camera group configuration",
"properties": {
"cameras": {
"label": "List of cameras in this group."
},
"icon": {
"label": "Icon that represents camera group."
},
"order": {
"label": "Sort order for group."
}
}
}

View File

@@ -1,761 +0,0 @@
{
"label": "Camera configuration.",
"properties": {
"name": {
"label": "Camera name."
},
"friendly_name": {
"label": "Camera friendly name used in the Frigate UI."
},
"enabled": {
"label": "Enable camera."
},
"audio": {
"label": "Audio events configuration.",
"properties": {
"enabled": {
"label": "Enable audio events."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event."
},
"min_volume": {
"label": "Min volume required to run audio detection."
},
"listen": {
"label": "Audio to listen for."
},
"filters": {
"label": "Audio filters."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection."
},
"num_threads": {
"label": "Number of detection threads"
}
}
},
"audio_transcription": {
"label": "Audio transcription config.",
"properties": {
"enabled": {
"label": "Enable audio transcription."
},
"language": {
"label": "Language abbreviation to use for audio event transcription/translation."
},
"device": {
"label": "The device used for license plate recognition."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"enabled_in_config": {
"label": "Keep track of original state of camera."
},
"live_enabled": {
"label": "Enable live transcriptions."
}
}
},
"birdseye": {
"label": "Birdseye camera configuration.",
"properties": {
"enabled": {
"label": "Enable birdseye view for camera."
},
"mode": {
"label": "Tracking mode for camera."
},
"order": {
"label": "Position of the camera in the birdseye view."
}
}
},
"detect": {
"label": "Object detection configuration.",
"properties": {
"enabled": {
"label": "Detection Enabled."
},
"height": {
"label": "Height of the stream for the detect role."
},
"width": {
"label": "Width of the stream for the detect role."
},
"fps": {
"label": "Number of frames per second to process through detection."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends."
},
"stationary": {
"label": "Stationary objects config.",
"properties": {
"interval": {
"label": "Frame interval for checking stationary objects."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary"
},
"max_frames": {
"label": "Max frames for stationary objects.",
"properties": {
"default": {
"label": "Default max frames."
},
"objects": {
"label": "Object specific max frames."
}
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
}
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by."
}
}
},
"face_recognition": {
"label": "Face recognition config.",
"properties": {
"enabled": {
"label": "Enable face recognition."
},
"min_area": {
"label": "Min area of face box to consider running face recognition."
}
}
},
"ffmpeg": {
"label": "FFmpeg configuration for the camera.",
"properties": {
"path": {
"label": "FFmpeg path"
},
"global_args": {
"label": "Global FFmpeg arguments."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments."
},
"input_args": {
"label": "FFmpeg input arguments."
},
"output_args": {
"label": "FFmpeg output arguments per role.",
"properties": {
"detect": {
"label": "Detect role FFmpeg output arguments."
},
"record": {
"label": "Record role FFmpeg output arguments."
}
}
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players."
},
"inputs": {
"label": "Camera inputs."
}
}
},
"live": {
"label": "Live playback settings.",
"properties": {
"streams": {
"label": "Friendly names and restream names to use for live view."
},
"height": {
"label": "Live camera view height"
},
"quality": {
"label": "Live camera view quality"
}
}
},
"lpr": {
"label": "LPR config.",
"properties": {
"enabled": {
"label": "Enable license plate recognition."
},
"expire_time": {
"label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)."
},
"min_area": {
"label": "Minimum area of license plate to begin running recognition."
},
"enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition."
}
}
},
"motion": {
"label": "Motion detection configuration.",
"properties": {
"enabled": {
"label": "Enable motion on all cameras."
},
"threshold": {
"label": "Motion detection threshold (1-255)."
},
"lightning_threshold": {
"label": "Lightning detection threshold (0.3-1.0)."
},
"improve_contrast": {
"label": "Improve Contrast"
},
"contour_area": {
"label": "Contour Area"
},
"delta_alpha": {
"label": "Delta Alpha"
},
"frame_alpha": {
"label": "Frame Alpha"
},
"frame_height": {
"label": "Frame Height"
},
"mask": {
"label": "Coordinates polygon for the motion mask."
},
"mqtt_off_delay": {
"label": "Delay for updating MQTT with no motion detected."
},
"enabled_in_config": {
"label": "Keep track of original state of motion detection."
}
}
},
"objects": {
"label": "Object configuration.",
"properties": {
"track": {
"label": "Objects to track."
},
"filters": {
"label": "Object filters.",
"properties": {
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration."
}
}
},
"mask": {
"label": "Object mask."
},
"genai": {
"label": "Config for using genai to analyze objects.",
"properties": {
"enabled": {
"label": "Enable GenAI for camera."
},
"use_snapshot": {
"label": "Use snapshots for generating descriptions."
},
"prompt": {
"label": "Default caption prompt."
},
"object_prompts": {
"label": "Object specific prompts."
},
"objects": {
"label": "List of objects to run generative AI for."
},
"required_zones": {
"label": "List of required zones to be entered in order to run generative AI."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
},
"send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object.",
"properties": {
"tracked_object_end": {
"label": "Send once the object is no longer tracked."
},
"after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
}
}
}
}
},
"record": {
"label": "Record configuration.",
"properties": {
"enabled": {
"label": "Enable record on all cameras."
},
"sync_recordings": {
"label": "Sync recordings with disk on startup and once a day."
},
"expire_interval": {
"label": "Number of minutes to wait between cleanup runs."
},
"continuous": {
"label": "Continuous recording retention settings.",
"properties": {
"days": {
"label": "Default retention period."
}
}
},
"motion": {
"label": "Motion recording retention settings.",
"properties": {
"days": {
"label": "Default retention period."
}
}
},
"detections": {
"label": "Detection specific retention settings.",
"properties": {
"pre_capture": {
"label": "Seconds to retain before event starts."
},
"post_capture": {
"label": "Seconds to retain after event ends."
},
"retain": {
"label": "Event retention settings.",
"properties": {
"days": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
}
}
}
}
},
"alerts": {
"label": "Alert specific retention settings.",
"properties": {
"pre_capture": {
"label": "Seconds to retain before event starts."
},
"post_capture": {
"label": "Seconds to retain after event ends."
},
"retain": {
"label": "Event retention settings.",
"properties": {
"days": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
}
}
}
}
},
"export": {
"label": "Recording Export Config",
"properties": {
"timelapse_args": {
"label": "Timelapse Args"
}
}
},
"preview": {
"label": "Recording Preview Config",
"properties": {
"quality": {
"label": "Quality of recording preview."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of recording."
}
}
},
"review": {
"label": "Review configuration.",
"properties": {
"alerts": {
"label": "Review alerts config.",
"properties": {
"enabled": {
"label": "Enable alerts."
},
"labels": {
"label": "Labels to create alerts for."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert."
},
"enabled_in_config": {
"label": "Keep track of original state of alerts."
},
"cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred."
}
}
},
"detections": {
"label": "Review detections config.",
"properties": {
"enabled": {
"label": "Enable detections."
},
"labels": {
"label": "Labels to create detections for."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection."
},
"cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred."
},
"enabled_in_config": {
"label": "Keep track of original state of detections."
}
}
},
"genai": {
"label": "Review description genai config.",
"properties": {
"enabled": {
"label": "Enable GenAI descriptions for review items."
},
"alerts": {
"label": "Enable GenAI for alerts."
},
"detections": {
"label": "Enable GenAI for detections."
},
"additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
},
"preferred_language": {
"label": "Preferred language for GenAI Response"
},
"activity_context_prompt": {
"label": "Custom activity context prompt defining normal activity patterns for this property."
}
}
}
}
},
"semantic_search": {
"label": "Semantic search configuration.",
"properties": {
"triggers": {
"label": "Trigger actions on tracked objects that match existing thumbnails or descriptions",
"properties": {
"enabled": {
"label": "Enable this trigger"
},
"type": {
"label": "Type of trigger"
},
"data": {
"label": "Trigger content (text phrase or image ID)"
},
"threshold": {
"label": "Confidence score required to run the trigger"
},
"actions": {
"label": "Actions to perform when trigger is matched"
}
}
}
}
},
"snapshots": {
"label": "Snapshot configuration.",
"properties": {
"enabled": {
"label": "Snapshots enabled."
},
"clean_copy": {
"label": "Create a clean copy of the snapshot image."
},
"timestamp": {
"label": "Add a timestamp overlay on the snapshot."
},
"bounding_box": {
"label": "Add a bounding box overlay on the snapshot."
},
"crop": {
"label": "Crop the snapshot to the detected object."
},
"required_zones": {
"label": "List of required zones to be entered in order to save a snapshot."
},
"height": {
"label": "Snapshot image height."
},
"retain": {
"label": "Snapshot retention.",
"properties": {
"default": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
},
"objects": {
"label": "Object retention period."
}
}
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)."
}
}
},
"timestamp_style": {
"label": "Timestamp style configuration.",
"properties": {
"position": {
"label": "Timestamp position."
},
"format": {
"label": "Timestamp format."
},
"color": {
"label": "Timestamp color.",
"properties": {
"red": {
"label": "Red"
},
"green": {
"label": "Green"
},
"blue": {
"label": "Blue"
}
}
},
"thickness": {
"label": "Timestamp thickness."
},
"effect": {
"label": "Timestamp effect."
}
}
},
"best_image_timeout": {
"label": "How long to wait for the image with the highest confidence score."
},
"mqtt": {
"label": "MQTT configuration.",
"properties": {
"enabled": {
"label": "Send image over MQTT."
},
"timestamp": {
"label": "Add timestamp to MQTT image."
},
"bounding_box": {
"label": "Add bounding box to MQTT image."
},
"crop": {
"label": "Crop MQTT image to detected object."
},
"height": {
"label": "MQTT image height."
},
"required_zones": {
"label": "List of required zones to be entered in order to send the image."
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)."
}
}
},
"notifications": {
"label": "Notifications configuration.",
"properties": {
"enabled": {
"label": "Enable notifications"
},
"email": {
"label": "Email required for push."
},
"cooldown": {
"label": "Cooldown period for notifications (time in seconds)."
},
"enabled_in_config": {
"label": "Keep track of original state of notifications."
}
}
},
"onvif": {
"label": "Camera Onvif Configuration.",
"properties": {
"host": {
"label": "Onvif Host"
},
"port": {
"label": "Onvif Port"
},
"user": {
"label": "Onvif Username"
},
"password": {
"label": "Onvif Password"
},
"tls_insecure": {
"label": "Onvif Disable TLS verification"
},
"autotracking": {
"label": "PTZ auto tracking config.",
"properties": {
"enabled": {
"label": "Enable PTZ object autotracking."
},
"calibrate_on_startup": {
"label": "Perform a camera calibration when Frigate starts."
},
"zooming": {
"label": "Autotracker zooming mode."
},
"zoom_factor": {
"label": "Zooming factor (0.1-0.75)."
},
"track": {
"label": "Objects to track."
},
"required_zones": {
"label": "List of required zones to be entered in order to begin autotracking."
},
"return_preset": {
"label": "Name of camera preset to return to when object tracking is over."
},
"timeout": {
"label": "Seconds to delay before returning to preset."
},
"movement_weights": {
"label": "Internal value used for PTZ movements based on the speed of your camera's motor."
},
"enabled_in_config": {
"label": "Keep track of original state of autotracking."
}
}
},
"ignore_time_mismatch": {
"label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server"
}
}
},
"type": {
"label": "Camera Type"
},
"ui": {
"label": "Camera UI Modifications.",
"properties": {
"order": {
"label": "Order of camera in UI."
},
"dashboard": {
"label": "Show this camera in Frigate dashboard UI."
}
}
},
"webui_url": {
"label": "URL to visit the camera directly from system page"
},
"zones": {
"label": "Zone configuration.",
"properties": {
"filters": {
"label": "Zone filters.",
"properties": {
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration."
}
}
},
"coordinates": {
"label": "Coordinates polygon for the defined zone."
},
"distances": {
"label": "Real-world distances for the sides of quadrilateral for the defined zone."
},
"inertia": {
"label": "Number of consecutive frames required for object to be considered present in the zone."
},
"loitering_time": {
"label": "Number of seconds that an object must loiter to be considered in the zone."
},
"speed_threshold": {
"label": "Minimum speed value for an object to be considered in the zone."
},
"objects": {
"label": "List of objects that can trigger the zone."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of camera."
}
}
}

View File

@@ -1,58 +0,0 @@
{
"label": "Object classification config.",
"properties": {
"bird": {
"label": "Bird classification config.",
"properties": {
"enabled": {
"label": "Enable bird classification."
},
"threshold": {
"label": "Minimum classification score required to be considered a match."
}
}
},
"custom": {
"label": "Custom Classification Model Configs.",
"properties": {
"enabled": {
"label": "Enable running the model."
},
"name": {
"label": "Name of classification model."
},
"threshold": {
"label": "Classification score threshold to change the state."
},
"object_config": {
"properties": {
"objects": {
"label": "Object types to classify."
},
"classification_type": {
"label": "Type of classification that is applied."
}
}
},
"state_config": {
"properties": {
"cameras": {
"label": "Cameras to run classification on.",
"properties": {
"crop": {
"label": "Crop of image frame on this camera to run classification on."
}
}
},
"motion": {
"label": "If classification should be run when motion is detected in the crop."
},
"interval": {
"label": "Interval to run classification on in seconds."
}
}
}
}
}
}
}

View File

@@ -1,8 +0,0 @@
{
"label": "Database configuration.",
"properties": {
"path": {
"label": "Database path."
}
}
}

View File

@@ -1,51 +0,0 @@
{
"label": "Global object tracking configuration.",
"properties": {
"enabled": {
"label": "Detection Enabled."
},
"height": {
"label": "Height of the stream for the detect role."
},
"width": {
"label": "Width of the stream for the detect role."
},
"fps": {
"label": "Number of frames per second to process through detection."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends."
},
"stationary": {
"label": "Stationary objects config.",
"properties": {
"interval": {
"label": "Frame interval for checking stationary objects."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary"
},
"max_frames": {
"label": "Max frames for stationary objects.",
"properties": {
"default": {
"label": "Default max frames."
},
"objects": {
"label": "Object specific max frames."
}
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
}
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by."
}
}
}

View File

@@ -1,14 +0,0 @@
{
"label": "Detector hardware configuration.",
"properties": {
"type": {
"label": "Detector Type"
},
"model": {
"label": "Detector specific model configuration."
},
"model_path": {
"label": "Detector specific model path."
}
}
}

View File

@@ -1,3 +0,0 @@
{
"label": "Frigate environment variables."
}

View File

@@ -1,36 +0,0 @@
{
"label": "Face recognition config.",
"properties": {
"enabled": {
"label": "Enable face recognition."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"unknown_score": {
"label": "Minimum face distance score required to be marked as a potential match."
},
"detection_threshold": {
"label": "Minimum face detection score required to be considered a face."
},
"recognition_threshold": {
"label": "Minimum face distance score required to be considered a match."
},
"min_area": {
"label": "Min area of face box to consider running face recognition."
},
"min_faces": {
"label": "Min face recognitions for the sub label to be applied to the person object."
},
"save_attempts": {
"label": "Number of face attempts to save in the train tab."
},
"blur_confidence_filter": {
"label": "Apply blur quality filter to face confidence."
},
"device": {
"label": "The device key to use for face recognition.",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}
}
}

View File

@@ -1,34 +0,0 @@
{
"label": "Global FFmpeg configuration.",
"properties": {
"path": {
"label": "FFmpeg path"
},
"global_args": {
"label": "Global FFmpeg arguments."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments."
},
"input_args": {
"label": "FFmpeg input arguments."
},
"output_args": {
"label": "FFmpeg output arguments per role.",
"properties": {
"detect": {
"label": "Detect role FFmpeg output arguments."
},
"record": {
"label": "Record role FFmpeg output arguments."
}
}
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players."
}
}
}

View File

@@ -1,20 +0,0 @@
{
"label": "Generative AI configuration.",
"properties": {
"api_key": {
"label": "Provider API key."
},
"base_url": {
"label": "Provider base url."
},
"model": {
"label": "GenAI model."
},
"provider": {
"label": "GenAI provider."
},
"provider_options": {
"label": "GenAI Provider extra options."
}
}
}

View File

@@ -1,3 +0,0 @@
{
"label": "Global restream configuration."
}

View File

@@ -1,14 +0,0 @@
{
"label": "Live playback settings.",
"properties": {
"streams": {
"label": "Friendly names and restream names to use for live view."
},
"height": {
"label": "Live camera view height"
},
"quality": {
"label": "Live camera view quality"
}
}
}

View File

@@ -1,11 +0,0 @@
{
"label": "Logging configuration.",
"properties": {
"default": {
"label": "Default logging level."
},
"logs": {
"label": "Log level for specified processes."
}
}
}

View File

@@ -1,45 +0,0 @@
{
"label": "License Plate recognition config.",
"properties": {
"enabled": {
"label": "Enable license plate recognition."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"detection_threshold": {
"label": "License plate object confidence score required to begin running recognition."
},
"min_area": {
"label": "Minimum area of license plate to begin running recognition."
},
"recognition_threshold": {
"label": "Recognition confidence score required to add the plate to the object as a sub label."
},
"min_plate_length": {
"label": "Minimum number of characters a license plate must have to be added to the object as a sub label."
},
"format": {
"label": "Regular expression for the expected format of license plate."
},
"match_distance": {
"label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate."
},
"known_plates": {
"label": "Known plates to track (strings or regular expressions)."
},
"enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition."
},
"debug_save_plates": {
"label": "Save plates captured for LPR for debugging purposes."
},
"device": {
"label": "The device key to use for LPR.",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
},
"replace_rules": {
"label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'."
}
}
}

View File

@@ -1,35 +0,0 @@
{
"label": "Detection model configuration.",
"properties": {
"path": {
"label": "Custom Object detection model path."
},
"labelmap_path": {
"label": "Label map for custom object detector."
},
"width": {
"label": "Object detection model input width."
},
"height": {
"label": "Object detection model input height."
},
"labelmap": {
"label": "Labelmap customization."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels."
},
"input_tensor": {
"label": "Model Input Tensor Shape"
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format"
},
"input_dtype": {
"label": "Model Input D Type"
},
"model_type": {
"label": "Object Detection Model Type"
}
}
}

View File

@@ -1,3 +0,0 @@
{
"label": "Global motion detection configuration."
}

View File

@@ -1,44 +0,0 @@
{
"label": "MQTT configuration.",
"properties": {
"enabled": {
"label": "Enable MQTT Communication."
},
"host": {
"label": "MQTT Host"
},
"port": {
"label": "MQTT Port"
},
"topic_prefix": {
"label": "MQTT Topic Prefix"
},
"client_id": {
"label": "MQTT Client ID"
},
"stats_interval": {
"label": "MQTT Camera Stats Interval"
},
"user": {
"label": "MQTT Username"
},
"password": {
"label": "MQTT Password"
},
"tls_ca_certs": {
"label": "MQTT TLS CA Certificates"
},
"tls_client_cert": {
"label": "MQTT TLS Client Certificate"
},
"tls_client_key": {
"label": "MQTT TLS Client Key"
},
"tls_insecure": {
"label": "MQTT TLS Insecure"
},
"qos": {
"label": "MQTT QoS"
}
}
}

View File

@@ -1,13 +0,0 @@
{
"label": "Networking configuration",
"properties": {
"ipv6": {
"label": "Network configuration",
"properties": {
"enabled": {
"label": "Enable IPv6 for port 5000 and/or 8971"
}
}
}
}
}

View File

@@ -1,17 +0,0 @@
{
"label": "Global notification configuration.",
"properties": {
"enabled": {
"label": "Enable notifications"
},
"email": {
"label": "Email required for push."
},
"cooldown": {
"label": "Cooldown period for notifications (time in seconds)."
},
"enabled_in_config": {
"label": "Keep track of original state of notifications."
}
}
}

View File

@@ -1,77 +0,0 @@
{
"label": "Global object configuration.",
"properties": {
"track": {
"label": "Objects to track."
},
"filters": {
"label": "Object filters.",
"properties": {
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration."
}
}
},
"mask": {
"label": "Object mask."
},
"genai": {
"label": "Config for using genai to analyze objects.",
"properties": {
"enabled": {
"label": "Enable GenAI for camera."
},
"use_snapshot": {
"label": "Use snapshots for generating descriptions."
},
"prompt": {
"label": "Default caption prompt."
},
"object_prompts": {
"label": "Object specific prompts."
},
"objects": {
"label": "List of objects to run generative AI for."
},
"required_zones": {
"label": "List of required zones to be entered in order to run generative AI."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
},
"send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object.",
"properties": {
"tracked_object_end": {
"label": "Send once the object is no longer tracked."
},
"after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
}
}
}
}
}

View File

@@ -1,31 +0,0 @@
{
"label": "Proxy configuration.",
"properties": {
"header_map": {
"label": "Header mapping definitions for proxy user passing.",
"properties": {
"user": {
"label": "Header name from upstream proxy to identify user."
},
"role": {
"label": "Header name from upstream proxy to identify user role."
},
"role_map": {
"label": "Mapping of Frigate roles to upstream group values. "
}
}
},
"logout_url": {
"label": "Redirect url for logging out with proxy."
},
"auth_secret": {
"label": "Secret value for proxy authentication."
},
"default_role": {
"label": "Default role for proxy users."
},
"separator": {
"label": "The character used to separate values in a mapped header."
}
}
}

View File

@@ -1,93 +0,0 @@
{
"label": "Global record configuration.",
"properties": {
"enabled": {
"label": "Enable record on all cameras."
},
"sync_recordings": {
"label": "Sync recordings with disk on startup and once a day."
},
"expire_interval": {
"label": "Number of minutes to wait between cleanup runs."
},
"continuous": {
"label": "Continuous recording retention settings.",
"properties": {
"days": {
"label": "Default retention period."
}
}
},
"motion": {
"label": "Motion recording retention settings.",
"properties": {
"days": {
"label": "Default retention period."
}
}
},
"detections": {
"label": "Detection specific retention settings.",
"properties": {
"pre_capture": {
"label": "Seconds to retain before event starts."
},
"post_capture": {
"label": "Seconds to retain after event ends."
},
"retain": {
"label": "Event retention settings.",
"properties": {
"days": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
}
}
}
}
},
"alerts": {
"label": "Alert specific retention settings.",
"properties": {
"pre_capture": {
"label": "Seconds to retain before event starts."
},
"post_capture": {
"label": "Seconds to retain after event ends."
},
"retain": {
"label": "Event retention settings.",
"properties": {
"days": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
}
}
}
}
},
"export": {
"label": "Recording Export Config",
"properties": {
"timelapse_args": {
"label": "Timelapse Args"
}
}
},
"preview": {
"label": "Recording Preview Config",
"properties": {
"quality": {
"label": "Quality of recording preview."
}
}
},
"enabled_in_config": {
"label": "Keep track of original state of recording."
}
}
}

View File

@@ -1,74 +0,0 @@
{
"label": "Review configuration.",
"properties": {
"alerts": {
"label": "Review alerts config.",
"properties": {
"enabled": {
"label": "Enable alerts."
},
"labels": {
"label": "Labels to create alerts for."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert."
},
"enabled_in_config": {
"label": "Keep track of original state of alerts."
},
"cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred."
}
}
},
"detections": {
"label": "Review detections config.",
"properties": {
"enabled": {
"label": "Enable detections."
},
"labels": {
"label": "Labels to create detections for."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection."
},
"cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred."
},
"enabled_in_config": {
"label": "Keep track of original state of detections."
}
}
},
"genai": {
"label": "Review description genai config.",
"properties": {
"enabled": {
"label": "Enable GenAI descriptions for review items."
},
"alerts": {
"label": "Enable GenAI for alerts."
},
"detections": {
"label": "Enable GenAI for detections."
},
"additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
},
"preferred_language": {
"label": "Preferred language for GenAI Response"
},
"activity_context_prompt": {
"label": "Custom activity context prompt defining normal activity patterns for this property."
}
}
}
}
}

View File

@@ -1,3 +0,0 @@
{
"label": "If Frigate should be started in safe mode."
}

View File

@@ -1,21 +0,0 @@
{
"label": "Semantic search configuration.",
"properties": {
"enabled": {
"label": "Enable semantic search."
},
"reindex": {
"label": "Reindex all tracked objects on startup."
},
"model": {
"label": "The CLIP model to use for semantic search."
},
"model_size": {
"label": "The size of the embeddings model used."
},
"device": {
"label": "The device key to use for semantic search.",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}
}
}

View File

@@ -1,43 +0,0 @@
{
"label": "Global snapshots configuration.",
"properties": {
"enabled": {
"label": "Snapshots enabled."
},
"clean_copy": {
"label": "Create a clean copy of the snapshot image."
},
"timestamp": {
"label": "Add a timestamp overlay on the snapshot."
},
"bounding_box": {
"label": "Add a bounding box overlay on the snapshot."
},
"crop": {
"label": "Crop the snapshot to the detected object."
},
"required_zones": {
"label": "List of required zones to be entered in order to save a snapshot."
},
"height": {
"label": "Snapshot image height."
},
"retain": {
"label": "Snapshot retention.",
"properties": {
"default": {
"label": "Default retention period."
},
"mode": {
"label": "Retain mode."
},
"objects": {
"label": "Object retention period."
}
}
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)."
}
}
}

View File

@@ -1,28 +0,0 @@
{
"label": "Telemetry configuration.",
"properties": {
"network_interfaces": {
"label": "Enabled network interfaces for bandwidth calculation."
},
"stats": {
"label": "System Stats Configuration",
"properties": {
"amd_gpu_stats": {
"label": "Enable AMD GPU stats."
},
"intel_gpu_stats": {
"label": "Enable Intel GPU stats."
},
"network_bandwidth": {
"label": "Enable network bandwidth for ffmpeg processes."
},
"intel_gpu_device": {
"label": "Define the device to use when gathering SR-IOV stats."
}
}
},
"version_check": {
"label": "Enable latest version check."
}
}
}

View File

@@ -1,31 +0,0 @@
{
"label": "Global timestamp style configuration.",
"properties": {
"position": {
"label": "Timestamp position."
},
"format": {
"label": "Timestamp format."
},
"color": {
"label": "Timestamp color.",
"properties": {
"red": {
"label": "Red"
},
"green": {
"label": "Green"
},
"blue": {
"label": "Blue"
}
}
},
"thickness": {
"label": "Timestamp thickness."
},
"effect": {
"label": "Timestamp effect."
}
}
}

View File

@@ -1,8 +0,0 @@
{
"label": "TLS configuration.",
"properties": {
"enabled": {
"label": "Enable TLS for port 8971"
}
}
}

View File

@@ -1,23 +0,0 @@
{
"label": "UI configuration.",
"properties": {
"timezone": {
"label": "Override UI timezone."
},
"time_format": {
"label": "Override UI time format."
},
"date_style": {
"label": "Override UI dateStyle."
},
"time_style": {
"label": "Override UI timeStyle."
},
"strftime_fmt": {
"label": "Override date and time format using strftime syntax."
},
"unit_system": {
"label": "The unit system to use for measurements."
}
}
}

View File

@@ -1,3 +0,0 @@
{
"label": "Current config version."
}

View File

@@ -66,7 +66,7 @@
"selectImage": "Please select an image file."
},
"dropActive": "Drop the image here…",
"dropInstructions": "Drag and drop or paste an image here, or click to select",
"dropInstructions": "Drag and drop an image here, or click to select",
"maxSize": "Max size: {{size}}MB"
},
"nofaces": "No faces available",

View File

@@ -124,9 +124,6 @@
"available": "Audio is available for this stream",
"unavailable": "Audio is not available for this stream"
},
"debug": {
"picker": "Stream selection unavailable in debug mode. Debug view always uses the stream assigned the detect role."
},
"twoWayTalk": {
"tips": "Your device must support the feature and WebRTC must be configured for two-way talk.",
"available": "Two-way talk is available for this stream",

View File

@@ -192,10 +192,6 @@
"audioTranscription": {
"label": "Transcribir",
"aria": "Solicitar transcripción de audio"
},
"addTrigger": {
"label": "Añadir disparador",
"aria": "Añadir disparador para el objeto seguido"
}
},
"dialog": {

View File

@@ -134,9 +134,6 @@
"playInBackground": {
"label": "Reproducir en segundo plano",
"tips": "Habilita esta opción para continuar la transmisión cuando el reproductor esté oculto."
},
"debug": {
"picker": "Selección de transmisión no disponible en mode de debug. La vista de debug siempre usa la transmisión con el rol de deteccción asignado."
}
},
"cameraSettings": {

View File

@@ -562,8 +562,7 @@
"adminDesc": "Acceso completo a todas las funciones.",
"viewerDesc": "Limitado a paneles en vivo, revisión, exploración y exportaciones únicamente.",
"viewer": "Espectador",
"admin": "Administrador",
"customDesc": "Rol personalizado con acceso a cámaras."
"admin": "Administrador"
},
"select": "Selecciona un rol"
},
@@ -742,99 +741,6 @@
"management": {
"title": "Gestión de disparadores",
"desc": "Gestionar disparadores para {{camera}}. Usa el tipo de miniatura para activar en miniaturas similares al objeto rastreado seleccionado, y el tipo de descripción para activar en descripciones similares al texto que especifiques."
},
"addTrigger": "Añadir Disparador",
"table": {
"name": "Nombre",
"type": "Tipo",
"content": "Contenido",
"threshold": "Umbral",
"actions": "Acciones",
"noTriggers": "No hay disparadores configurados para esta cámara.",
"edit": "Editar",
"deleteTrigger": "Eliminar Disparador",
"lastTriggered": "Última activación"
},
"type": {
"description": "Descripción",
"thumbnail": "Miniatura"
},
"actions": {
"alert": "Marcar como Alerta",
"notification": "Enviar Notificación"
},
"dialog": {
"createTrigger": {
"title": "Crear Disparador",
"desc": "Crear un disparador par la cámara {{camera}}"
},
"editTrigger": {
"title": "Editar Disparador",
"desc": "Editar configuractión del disparador para cámara {{camera}}"
},
"deleteTrigger": {
"title": "Eliminar Disparador"
}
}
},
"roles": {
"management": {
"title": "Administración del rol de visor",
"desc": "Administra roles de visor personalizados y sus permisos de acceso a cámaras para esta instancia de Frigate."
},
"addRole": "Añade un rol",
"table": {
"role": "Rol",
"cameras": "Cámaras",
"actions": "Acciones",
"noRoles": "No se encontraron roles personalizados.",
"editCameras": "Edita Cámaras",
"deleteRole": "Eliminar Rol"
},
"toast": {
"success": {
"createRole": "Rol {{role}} creado exitosamente",
"updateCameras": "Cámara actualizada para el rol {{role}}",
"deleteRole": "Rol {{role}} eliminado exitosamente",
"userRolesUpdated": "{{count}} usuarios asignados a este rol han sido actualizados a 'visor', que tiene acceso a todas las cámaras."
},
"error": {
"createRoleFailed": "Creación de rol fallida: {{errorMessage}}",
"updateCamerasFailed": "Actualización de cámaras fallida: {{errorMessage}}",
"deleteRoleFailed": "Eliminación de rol fallida: {{errorMessage}}",
"userUpdateFailed": "Actualización de roles de usuario fallida: {{errorMessage}}"
}
},
"dialog": {
"createRole": {
"title": "Crear Nuevo Rol",
"desc": "Añadir nuevo rol y especificar permisos de acceso a cámaras."
},
"deleteRole": {
"title": "Eliminar Rol",
"deleting": "Eliminando...",
"desc": "Esta acción no se puede deshacer. El rol va a ser eliminado permanentemente y usuarios associados serán asignados a rol de 'Visor', que les da acceso a ver todas las cámaras.",
"warn": "Estás seguro de que quieres eliminar <strong>{{role}}</strong>?"
},
"editCameras": {
"title": "Editar cámaras de rol",
"desc": "Actualizar acceso de cámara para el rol <strong>{{role}}</strong>."
},
"form": {
"role": {
"title": "Nombre de rol",
"placeholder": "Entre el nombre del rol",
"desc": "Solo se permiten letras, números, puntos y guión bajo.",
"roleIsRequired": "El nombre del rol es requerido",
"roleOnlyInclude": "El nombre del rol solo incluye letras, números, . o _",
"roleExists": "Un rol con este nombre ya existe."
},
"cameras": {
"title": "Cámaras",
"desc": "Seleccione las cámaras a las que este rol tiene accceso. Al menos una cámara es requerida.",
"required": "Al menos una cámara debe ser seleccionada."
}
}
}
}
}

View File

@@ -125,10 +125,7 @@
"tips": "Activer cette option pour continuer le streaming lorsque le lecteur est masqué.",
"label": "Jouer en arrière plan"
},
"title": "Flux",
"debug": {
"picker": "La sélection de flux est indisponible en mode débogage. La vue de débogage utilise systématiquement le flux attribué au rôle de détection."
}
"title": "Flux"
},
"cameraSettings": {
"objectDetection": "Détection d'objets",

View File

@@ -1,3 +0,0 @@
{
"speech": "Govor"
}

View File

@@ -1,5 +0,0 @@
{
"time": {
"untilForTime": "Do {{time}}"
}
}

View File

@@ -1,5 +0,0 @@
{
"form": {
"user": "Korisničko ime"
}
}

View File

@@ -1,40 +0,0 @@
{
"group": {
"label": "Grupe kamera",
"add": "Dodaj grupu kamera",
"edit": "Uredi grupu kamera",
"delete": {
"label": "Izbriši grupu kamera",
"confirm": {
"title": "Potvrda brisanja",
"desc": "Da li ste sigurni da želite obrisati grupu kamera <em>{{name}}</em>?"
}
},
"name": {
"label": "Ime",
"placeholder": "Unesite ime…",
"errorMessage": {
"mustLeastCharacters": "Ime grupe kamera mora sadržavati barem 2 karaktera.",
"exists": "Grupa kamera sa ovim imenom već postoji.",
"nameMustNotPeriod": "Naziv grupe kamera ne smije sadržavati točku.",
"invalid": "Nevažeći naziv grupe kamera."
}
},
"cameras": {
"label": "Kamere",
"desc": "Izaberite kamere za ovu grupu."
},
"icon": "Ikona",
"success": "Grupa kamera ({{name}}) je pohranjena.",
"camera": {
"birdseye": "Ptičja perspektiva",
"setting": {
"label": "Postavke streamanja kamere",
"title": "{{cameraName}} Streaming Postavke",
"desc": "Promijenite opcije streamanja uživo za nadzornu ploču ove grupe kamera. <em>Ove postavke su specifične za uređaj/preglednik.</em>",
"audioIsAvailable": "Za ovaj prijenos dostupan je zvuk",
"audioIsUnavailable": "Za ovaj prijenos zvuk nije dostupan"
}
}
}
}

View File

@@ -1,5 +0,0 @@
{
"restart": {
"title": "Jeste li sigurni da želite ponovno pokrenuti Frigate?"
}
}

View File

@@ -1,6 +0,0 @@
{
"filter": "Filtar",
"classes": {
"label": "Klase"
}
}

View File

@@ -1,5 +0,0 @@
{
"iconPicker": {
"selectIcon": "Odaberite ikonu"
}
}

View File

@@ -1,7 +0,0 @@
{
"button": {
"downloadVideo": {
"label": "Preuzmi video"
}
}
}

View File

@@ -1,3 +0,0 @@
{
"noRecordingsFoundForThisTime": "Nisu pronađene snimke za ovo vrijeme"
}

View File

@@ -1,3 +0,0 @@
{
"person": "Osoba"
}

View File

@@ -1,3 +0,0 @@
{
"documentTitle": "Uređivač konfiguracije - Frigate"
}

View File

@@ -1,3 +0,0 @@
{
"alerts": "Upozorenja"
}

Some files were not shown because too many files have changed in this diff Show More