mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-10-28 01:41:53 +08:00
Compare commits
71 Commits
dependabot
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
710a77679b | ||
|
|
893fe79d22 | ||
|
|
5ff7a47ba9 | ||
|
|
5715ed62ad | ||
|
|
43706eb48d | ||
|
|
190925375b | ||
|
|
094a0a6e05 | ||
|
|
840d567d22 | ||
|
|
2c480b9a89 | ||
|
|
1fb21a4dac | ||
|
|
63042b9c08 | ||
|
|
0a6b9f98ed | ||
|
|
32875fb4cc | ||
|
|
9ec65d7aa9 | ||
|
|
83fa651ada | ||
|
|
eb51eb3c9d | ||
|
|
49f5d595ea | ||
|
|
e2da8aa04c | ||
|
|
f5a57edcc9 | ||
|
|
4df7793587 | ||
|
|
ac5de290ab | ||
|
|
8c3c596dee | ||
|
|
c5def83e08 | ||
|
|
81df534784 | ||
|
|
0d5cfa2e38 | ||
|
|
b38f830b3b | ||
|
|
2387dccc19 | ||
|
|
d6f5d2b0fa | ||
|
|
9638e85a1f | ||
|
|
c5fe354552 | ||
|
|
007371019a | ||
|
|
21ff257705 | ||
|
|
adb45e318f | ||
|
|
5225d599b9 | ||
|
|
da2f414f83 | ||
|
|
795bd9908c | ||
|
|
bbc130de18 | ||
|
|
ce07dae4ec | ||
|
|
cb7009102e | ||
|
|
2a7a9323c3 | ||
|
|
b55615196e | ||
|
|
d384f2fc32 | ||
|
|
3eac652ba2 | ||
|
|
45fabab417 | ||
|
|
8928b03497 | ||
|
|
7fc4822594 | ||
|
|
33725ddae9 | ||
|
|
b336bdec03 | ||
|
|
06cbdf6cce | ||
|
|
2ae4203dac | ||
|
|
d420816376 | ||
|
|
daa78361c5 | ||
|
|
6bc75e72b2 | ||
|
|
534db717c4 | ||
|
|
6220f337d9 | ||
|
|
32781af2a5 | ||
|
|
bd10b13bc3 | ||
|
|
c5fec3271f | ||
|
|
0743cb57c2 | ||
|
|
5dc8a85f2f | ||
|
|
0302db1c43 | ||
|
|
a4764563a5 | ||
|
|
942a61ddfb | ||
|
|
4d582062fb | ||
|
|
e0a8445bac | ||
|
|
2a271c0f5e | ||
|
|
925bf78811 | ||
|
|
59102794e8 | ||
|
|
20e5e3bdc0 | ||
|
|
b94ebda9e5 | ||
|
|
8cdaef307a |
6
.cursor/rules/frontend-always-use-translation-files.mdc
Normal file
6
.cursor/rules/frontend-always-use-translation-files.mdc
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
globs: ["**/*.ts", "**/*.tsx"]
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
Never write strings in the frontend directly, always write to and reference the relevant translations file.
|
||||
@@ -1,2 +1 @@
|
||||
scikit-build == 0.18.*
|
||||
nvidia-pyindex
|
||||
|
||||
@@ -73,6 +73,8 @@ http {
|
||||
vod_manifest_segment_durations_mode accurate;
|
||||
vod_ignore_edit_list on;
|
||||
vod_segment_duration 10000;
|
||||
|
||||
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
|
||||
vod_hls_mpegts_align_frames off;
|
||||
vod_hls_mpegts_interleave_frames on;
|
||||
|
||||
@@ -105,6 +107,10 @@ http {
|
||||
aio threads;
|
||||
vod hls;
|
||||
|
||||
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
|
||||
# Smaller segments, faster generation, better browser compatibility
|
||||
vod_hls_container_format fmp4;
|
||||
|
||||
secure_token $args;
|
||||
secure_token_types application/vnd.apple.mpegurl;
|
||||
|
||||
@@ -274,6 +280,18 @@ http {
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# Allow unauthenticated access to the first_time_login endpoint
|
||||
# so the login page can load help text before authentication.
|
||||
location /api/auth/first_time_login {
|
||||
auth_request off;
|
||||
limit_except GET {
|
||||
deny all;
|
||||
}
|
||||
rewrite ^/api(/.*)$ $1 break;
|
||||
proxy_pass http://frigate_api;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /api/stats {
|
||||
include auth_request.conf;
|
||||
access_log off;
|
||||
|
||||
@@ -164,13 +164,35 @@ According to [this discussion](https://github.com/blakeblackshear/frigate/issues
|
||||
Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels.
|
||||
The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras.
|
||||
|
||||
<details>
|
||||
<summary>Example Config</summary>
|
||||
|
||||
:::tip
|
||||
|
||||
Reolink's latest cameras support two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
|
||||
NOTE: The RTSP stream can not be prefixed with `ffmpeg:`, as go2rtc needs to handle the stream to support two way audio.
|
||||
|
||||
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||
|
||||
:::
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
# example for connecting to a standard Reolink camera
|
||||
your_reolink_camera:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
your_reolink_camera_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
# example for connectin to a Reolink camera that supports two way talk
|
||||
your_reolink_camera_twt:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub
|
||||
your_reolink_camera_twt_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub
|
||||
# example for connecting to a Reolink NVR
|
||||
your_reolink_camera_via_nvr:
|
||||
- "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15
|
||||
- "ffmpeg:your_reolink_camera_via_nvr#audio=aac"
|
||||
@@ -201,22 +223,7 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
```
|
||||
|
||||
#### Reolink Doorbell
|
||||
|
||||
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
|
||||
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
your_reolink_doorbell:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
- rtsp://reolink_ip/Preview_01_sub
|
||||
your_reolink_doorbell_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
```
|
||||
</details>
|
||||
|
||||
### Unifi Protect Cameras
|
||||
|
||||
|
||||
@@ -12,7 +12,18 @@ Object classification models are lightweight and run very fast on CPU. Inference
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
### Sub label vs Attribute
|
||||
## Classes
|
||||
|
||||
Classes are the categories your model will learn to distinguish between. Each class represents a distinct visual category that the model will predict.
|
||||
|
||||
For object classification:
|
||||
|
||||
- Define classes that represent different types or attributes of the detected object
|
||||
- Examples: For `person` objects, classes might be `delivery_person`, `resident`, `stranger`
|
||||
- Include a `none` class for objects that don't fit any specific category
|
||||
- Keep classes visually distinct to improve accuracy
|
||||
|
||||
### Classification Type
|
||||
|
||||
- **Sub label**:
|
||||
|
||||
@@ -67,7 +78,7 @@ When choosing which objects to classify, start with a small number of visually d
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
|
||||
- **Data collection**: Use the model’s Train tab to gather balanced examples across times of day, weather, and distances.
|
||||
- **Data collection**: Use the model’s Recent Classification tab to gather balanced examples across times of day, weather, and distances.
|
||||
- **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered.
|
||||
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
|
||||
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.
|
||||
|
||||
@@ -12,6 +12,17 @@ State classification models are lightweight and run very fast on CPU. Inference
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
## Classes
|
||||
|
||||
Classes are the different states an area on your camera can be in. Each class represents a distinct visual state that the model will learn to recognize.
|
||||
|
||||
For state classification:
|
||||
|
||||
- Define classes that represent mutually exclusive states
|
||||
- Examples: `open` and `closed` for a garage door, `on` and `off` for lights
|
||||
- Use at least 2 classes (typically binary states work best)
|
||||
- Keep class names clear and descriptive
|
||||
|
||||
## Example use cases
|
||||
|
||||
- **Door state**: Detect if a garage or front door is open vs closed.
|
||||
@@ -49,4 +60,4 @@ When choosing a portion of the camera frame for state classification, it is impo
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
|
||||
- **Data collection**: Use the model’s Train tab to gather balanced examples across times of day and weather.
|
||||
- **Data collection**: Use the model’s Recent Classifications tab to gather balanced examples across times of day and weather.
|
||||
|
||||
@@ -70,7 +70,7 @@ Fine-tune face recognition with these optional parameters at the global level of
|
||||
- `min_faces`: Min face recognitions for the sub label to be applied to the person object.
|
||||
- Default: `1`
|
||||
- `save_attempts`: Number of images of recognized faces to save for training.
|
||||
- Default: `100`.
|
||||
- Default: `200`.
|
||||
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
|
||||
- Default: `True`.
|
||||
- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
|
||||
@@ -114,9 +114,9 @@ When choosing images to include in the face training set it is recommended to al
|
||||
|
||||
:::
|
||||
|
||||
### Understanding the Train Tab
|
||||
### Understanding the Recent Recognitions Tab
|
||||
|
||||
The Train tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching.
|
||||
The Recent Recognitions tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching.
|
||||
|
||||
Each face image is labeled with a name (or `Unknown`) along with the confidence score of the recognition attempt. While each image can be used to train the system for a specific person, not all images are suitable for training.
|
||||
|
||||
@@ -140,7 +140,7 @@ Once front-facing images are performing well, start choosing slightly off-angle
|
||||
|
||||
Start with the [Usage](#usage) section and re-read the [Model Requirements](#model-requirements) above.
|
||||
|
||||
1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Train tab in the Frigate UI's Face Library.
|
||||
1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Recent Recognitions tab in the Frigate UI's Face Library.
|
||||
|
||||
If you are using a Frigate+ or `face` detecting model:
|
||||
|
||||
@@ -161,6 +161,8 @@ Start with the [Usage](#usage) section and re-read the [Model Requirements](#mod
|
||||
|
||||
Accuracy is definitely a going to be improved with higher quality cameras / streams. It is important to look at the DORI (Detection Observation Recognition Identification) range of your camera, if that specification is posted. This specification explains the distance from the camera that a person can be detected, observed, recognized, and identified. The identification range is the most relevant here, and the distance listed by the camera is the furthest that face recognition will realistically work.
|
||||
|
||||
Some users have also noted that setting the stream in camera firmware to a constant bit rate (CBR) leads to better image clarity than with a variable bit rate (VBR).
|
||||
|
||||
### Why can't I bulk upload photos?
|
||||
|
||||
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
|
||||
@@ -186,7 +188,7 @@ Avoid training on images that already score highly, as this can lead to over-fit
|
||||
No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person.
|
||||
For more guidance, refer to the section above on improving recognition accuracy.
|
||||
|
||||
### I see scores above the threshold in the train tab, but a sub label wasn't assigned?
|
||||
### I see scores above the threshold in the Recent Recognitions tab, but a sub label wasn't assigned?
|
||||
|
||||
The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results.
|
||||
|
||||
|
||||
@@ -17,18 +17,17 @@ To use Generative AI, you must define a single provider at the global level of y
|
||||
genai:
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-1.5-flash
|
||||
model: gemini-2.0-flash
|
||||
|
||||
cameras:
|
||||
front_camera:
|
||||
objects:
|
||||
genai:
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
objects:
|
||||
- person
|
||||
required_zones:
|
||||
- steps
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
objects:
|
||||
- person
|
||||
required_zones:
|
||||
- steps
|
||||
indoor_camera:
|
||||
objects:
|
||||
genai:
|
||||
@@ -80,7 +79,7 @@ Google Gemini has a free tier allowing [15 queries per minute](https://ai.google
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
|
||||
|
||||
### Get API Key
|
||||
|
||||
@@ -97,7 +96,7 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
|
||||
genai:
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-1.5-flash
|
||||
model: gemini-2.0-flash
|
||||
```
|
||||
|
||||
:::note
|
||||
@@ -112,7 +111,7 @@ OpenAI does not have a free tier for their API. With the release of gpt-4o, pric
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
|
||||
|
||||
### Get API Key
|
||||
|
||||
@@ -139,18 +138,19 @@ Microsoft offers several vision models through Azure OpenAI. A subscription is r
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
|
||||
|
||||
### Create Resource and Get API Key
|
||||
|
||||
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
|
||||
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below).
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: azure_openai
|
||||
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||
base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview
|
||||
model: gpt-5-mini
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
```
|
||||
|
||||
@@ -196,10 +196,10 @@ genai:
|
||||
model: llava
|
||||
|
||||
objects:
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||
object_prompts:
|
||||
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||
object_prompts:
|
||||
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
```
|
||||
|
||||
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
@@ -39,6 +39,26 @@ Each installation and even camera can have different parameters for what is cons
|
||||
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
|
||||
```
|
||||
|
||||
### Image Source
|
||||
|
||||
By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution:
|
||||
|
||||
```yaml
|
||||
review:
|
||||
genai:
|
||||
enabled: true
|
||||
image_source: recordings # Options: "preview" (default) or "recordings"
|
||||
```
|
||||
|
||||
When using `recordings`, frames are extracted at 480p resolution (480px height), providing better detail for the LLM while being mindful of context window size. This is particularly useful for scenarios where fine details matter, such as identifying license plates, reading text, or analyzing distant objects. Note that using recordings will:
|
||||
|
||||
- Provide higher quality images to the LLM (480p vs 180p preview images)
|
||||
- Use more tokens per image (~200-300 tokens vs ~100 tokens for preview)
|
||||
- Result in fewer frames being sent to stay within context limits (typically 6-12 frames vs 8-20 frames)
|
||||
- Require that recordings are enabled for the camera
|
||||
|
||||
If recordings are not available for a given time period, the system will automatically fall back to using preview frames.
|
||||
|
||||
### Additional Concerns
|
||||
|
||||
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
|
||||
|
||||
@@ -30,8 +30,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle`
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
## Configuration
|
||||
|
||||
License plate recognition is disabled by default. Enable it in your config file:
|
||||
|
||||
@@ -174,7 +174,7 @@ For devices that support two way talk, Frigate can be configured to use the feat
|
||||
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
|
||||
- For the Home Assistant Frigate card, [follow the docs](http://card.camera/#/usage/2-way-audio) for the correct source.
|
||||
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-cameras)
|
||||
|
||||
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` _usually_ supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
|
||||
|
||||
|
||||
@@ -1455,7 +1455,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /dfine
|
||||
RUN git clone https://github.com/Peterande/D-FINE.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx onnxruntime onnxsim
|
||||
RUN uv pip install --system onnx onnxruntime onnxsim onnxscript
|
||||
# Create output directory and download checkpoint
|
||||
RUN mkdir -p output
|
||||
ARG MODEL_SIZE
|
||||
@@ -1479,9 +1479,9 @@ FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /rfdetr
|
||||
RUN uv pip install --system rfdetr onnx onnxruntime onnxsim onnx-graphsurgeon
|
||||
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnxscript
|
||||
ARG MODEL_SIZE
|
||||
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export()"
|
||||
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export(simplify=True)"
|
||||
FROM scratch
|
||||
ARG MODEL_SIZE
|
||||
COPY --from=build /rfdetr/output/inference_model.onnx /rfdetr-${MODEL_SIZE}.onnx
|
||||
@@ -1529,7 +1529,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /yolov9
|
||||
ADD https://github.com/WongKinYiu/yolov9.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 onnxscript
|
||||
ARG MODEL_SIZE
|
||||
ARG IMG_SIZE
|
||||
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt
|
||||
|
||||
@@ -429,6 +429,10 @@ review:
|
||||
alerts: True
|
||||
# Optional: Enable GenAI review summaries for detections (default: shown below)
|
||||
detections: False
|
||||
# Optional: Image source for GenAI (default: preview)
|
||||
# Options: "preview" (uses cached preview frames at 180p) or "recordings" (extracts frames from recordings at 480p)
|
||||
# Using "recordings" provides better image quality but uses ~2-3x more tokens per image (~200-300 vs ~100 tokens)
|
||||
image_source: preview
|
||||
# Optional: Additional concerns that the GenAI should make note of (default: None)
|
||||
additional_concerns:
|
||||
- Animals in the garden
|
||||
@@ -630,7 +634,7 @@ face_recognition:
|
||||
# Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below)
|
||||
min_faces: 1
|
||||
# Optional: Number of images of recognized faces to save for training (default: shown below)
|
||||
save_attempts: 100
|
||||
save_attempts: 200
|
||||
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
|
||||
blur_confidence_filter: True
|
||||
# Optional: Set the model size used face recognition. (default: shown below)
|
||||
@@ -671,20 +675,18 @@ lpr:
|
||||
# Optional: List of regex replacement rules to normalize detected plates (default: shown below)
|
||||
replace_rules: {}
|
||||
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
# Optional: Configuration for AI / LLM provider
|
||||
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
||||
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at
|
||||
# the camera level (enabled: False) to enhance privacy for indoor cameras.
|
||||
# to Google or OpenAI's LLMs to generate descriptions. GenAI features can be configured at
|
||||
# the camera level to enhance privacy for indoor cameras.
|
||||
genai:
|
||||
# Optional: Enable AI description generation (default: shown below)
|
||||
enabled: False
|
||||
# Required if enabled: Provider must be one of ollama, gemini, or openai
|
||||
# Required: Provider must be one of ollama, gemini, or openai
|
||||
provider: ollama
|
||||
# Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider.
|
||||
base_url: http://localhost::11434
|
||||
# Required if gemini or openai
|
||||
api_key: "{FRIGATE_GENAI_API_KEY}"
|
||||
# Required if enabled: The model to use with the provider.
|
||||
# Required: The model to use with the provider.
|
||||
model: gemini-1.5-flash
|
||||
# Optional additional args to pass to the GenAI Provider (default: None)
|
||||
provider_options:
|
||||
|
||||
@@ -5,7 +5,7 @@ title: Updating
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.16.1**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.1).
|
||||
The current stable version of Frigate is **0.16.2**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.2).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
@@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.1` instead of `0.15.2`). For example:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.2` instead of `0.15.2`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.1`, `0.16.1-tensorrt`, or `stable`):
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.2`, `0.16.2-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
|
||||
@@ -161,7 +161,14 @@ Message published for updates to tracked object metadata, for example:
|
||||
|
||||
### `frigate/reviews`
|
||||
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated.
|
||||
|
||||
An `update` with the same ID will be published when:
|
||||
- The severity changes from `detection` to `alert`
|
||||
- Additional objects are detected
|
||||
- An object is recognized via face, lpr, etc.
|
||||
|
||||
When the review activity has ended a final `end` message is published.
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -42,6 +42,7 @@ Misidentified objects should have a correct label added. For example, if a perso
|
||||
| `w` | Add box |
|
||||
| `d` | Toggle difficult |
|
||||
| `s` | Switch to the next label |
|
||||
| `Shift + s` | Switch to the previous label |
|
||||
| `tab` | Select next largest box |
|
||||
| `del` | Delete current box |
|
||||
| `esc` | Deselect/Cancel |
|
||||
|
||||
998
docs/package-lock.json
generated
998
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^3.7.0",
|
||||
"@docusaurus/plugin-content-docs": "^3.9.2",
|
||||
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.7.0",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@inkeep/docusaurus": "^2.0.16",
|
||||
|
||||
@@ -387,20 +387,28 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
old_config: FrigateConfig = request.app.frigate_config
|
||||
request.app.frigate_config = config
|
||||
|
||||
if body.update_topic and body.update_topic.startswith("config/cameras/"):
|
||||
_, _, camera, field = body.update_topic.split("/")
|
||||
if body.update_topic:
|
||||
if body.update_topic.startswith("config/cameras/"):
|
||||
_, _, camera, field = body.update_topic.split("/")
|
||||
|
||||
if field == "add":
|
||||
settings = config.cameras[camera]
|
||||
elif field == "remove":
|
||||
settings = old_config.cameras[camera]
|
||||
if field == "add":
|
||||
settings = config.cameras[camera]
|
||||
elif field == "remove":
|
||||
settings = old_config.cameras[camera]
|
||||
else:
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
# Handle nested config updates (e.g., config/classification/custom/{name})
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
|
||||
settings,
|
||||
)
|
||||
if settings:
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
@@ -688,7 +696,11 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
|
||||
clauses.append((Timeline.camera == camera))
|
||||
|
||||
if source_id:
|
||||
clauses.append((Timeline.source_id == source_id))
|
||||
source_ids = [sid.strip() for sid in source_id.split(",")]
|
||||
if len(source_ids) == 1:
|
||||
clauses.append((Timeline.source_id == source_ids[0]))
|
||||
else:
|
||||
clauses.append((Timeline.source_id.in_(source_ids)))
|
||||
|
||||
if len(clauses) == 0:
|
||||
clauses.append((True))
|
||||
|
||||
@@ -35,6 +35,23 @@ logger = logging.getLogger(__name__)
|
||||
router = APIRouter(tags=[Tags.auth])
|
||||
|
||||
|
||||
@router.get("/auth/first_time_login")
|
||||
def first_time_login(request: Request):
|
||||
"""Return whether the admin first-time login help flag is set in config.
|
||||
|
||||
This endpoint is intentionally unauthenticated so the login page can
|
||||
query it before a user is authenticated.
|
||||
"""
|
||||
auth_config = request.app.frigate_config.auth
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"admin_first_time_login": auth_config.admin_first_time_login
|
||||
or auth_config.reset_admin_password
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
_limit = ""
|
||||
|
||||
@@ -515,6 +532,11 @@ def login(request: Request, body: AppPostLoginBody):
|
||||
set_jwt_cookie(
|
||||
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
|
||||
)
|
||||
# Clear admin_first_time_login flag after successful admin login so the
|
||||
# UI stops showing the first-time login documentation link.
|
||||
if role == "admin":
|
||||
request.app.frigate_config.auth.admin_first_time_login = False
|
||||
|
||||
return response
|
||||
return JSONResponse(content={"message": "Login failed"}, status_code=401)
|
||||
|
||||
|
||||
@@ -199,19 +199,30 @@ def ffprobe(request: Request, paths: str = "", detailed: bool = False):
|
||||
request.app.frigate_config.ffmpeg, path.strip(), detailed=detailed
|
||||
)
|
||||
|
||||
result = {
|
||||
"return_code": ffprobe.returncode,
|
||||
"stderr": (
|
||||
ffprobe.stderr.decode("unicode_escape").strip()
|
||||
if ffprobe.returncode != 0
|
||||
else ""
|
||||
),
|
||||
"stdout": (
|
||||
json.loads(ffprobe.stdout.decode("unicode_escape").strip())
|
||||
if ffprobe.returncode == 0
|
||||
else ""
|
||||
),
|
||||
}
|
||||
if ffprobe.returncode != 0:
|
||||
try:
|
||||
stderr_decoded = ffprobe.stderr.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
stderr_decoded = ffprobe.stderr.decode("unicode_escape")
|
||||
except Exception:
|
||||
stderr_decoded = str(ffprobe.stderr)
|
||||
|
||||
stderr_lines = [
|
||||
line.strip() for line in stderr_decoded.split("\n") if line.strip()
|
||||
]
|
||||
|
||||
result = {
|
||||
"return_code": ffprobe.returncode,
|
||||
"stderr": stderr_lines,
|
||||
"stdout": "",
|
||||
}
|
||||
else:
|
||||
result = {
|
||||
"return_code": ffprobe.returncode,
|
||||
"stderr": [],
|
||||
"stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip()),
|
||||
}
|
||||
|
||||
# Add detailed metadata if requested and probe was successful
|
||||
if detailed and ffprobe.returncode == 0 and result["stdout"]:
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import string
|
||||
from typing import Any
|
||||
|
||||
import cv2
|
||||
@@ -17,6 +19,8 @@ from frigate.api.auth import require_role
|
||||
from frigate.api.defs.request.classification_body import (
|
||||
AudioTranscriptionBody,
|
||||
DeleteFaceImagesBody,
|
||||
GenerateObjectExamplesBody,
|
||||
GenerateStateExamplesBody,
|
||||
RenameFaceBody,
|
||||
)
|
||||
from frigate.api.defs.response.classification_response import (
|
||||
@@ -30,6 +34,10 @@ from frigate.config.camera import DetectConfig
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
from frigate.util.classification import (
|
||||
collect_object_classification_examples,
|
||||
collect_state_classification_examples,
|
||||
)
|
||||
from frigate.util.path import get_event_snapshot
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -159,8 +167,7 @@ def train_face(request: Request, name: str, body: dict = None):
|
||||
new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp"
|
||||
new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}")
|
||||
|
||||
if not os.path.exists(new_file_folder):
|
||||
os.mkdir(new_file_folder)
|
||||
os.makedirs(new_file_folder, exist_ok=True)
|
||||
|
||||
if training_file_name:
|
||||
shutil.move(training_file, os.path.join(new_file_folder, new_name))
|
||||
@@ -701,13 +708,14 @@ def categorize_classification_image(request: Request, name: str, body: dict = No
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
new_name = f"{category}-{datetime.datetime.now().timestamp()}.png"
|
||||
random_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
timestamp = datetime.datetime.now().timestamp()
|
||||
new_name = f"{category}-{timestamp}-{random_id}.png"
|
||||
new_file_folder = os.path.join(
|
||||
CLIPS_DIR, sanitize_filename(name), "dataset", category
|
||||
)
|
||||
|
||||
if not os.path.exists(new_file_folder):
|
||||
os.mkdir(new_file_folder)
|
||||
os.makedirs(new_file_folder, exist_ok=True)
|
||||
|
||||
# use opencv because webp images can not be used to train
|
||||
img = cv2.imread(training_file)
|
||||
@@ -756,3 +764,43 @@ def delete_classification_train_images(request: Request, name: str, body: dict =
|
||||
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/classification/generate_examples/state",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Generate state classification examples",
|
||||
)
|
||||
async def generate_state_examples(request: Request, body: GenerateStateExamplesBody):
|
||||
"""Generate examples for state classification."""
|
||||
model_name = sanitize_filename(body.model_name)
|
||||
cameras_normalized = {
|
||||
camera_name: tuple(crop)
|
||||
for camera_name, crop in body.cameras.items()
|
||||
if camera_name in request.app.frigate_config.cameras
|
||||
}
|
||||
|
||||
collect_state_classification_examples(model_name, cameras_normalized)
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Example generation completed"},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/classification/generate_examples/object",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Generate object classification examples",
|
||||
)
|
||||
async def generate_object_examples(request: Request, body: GenerateObjectExamplesBody):
|
||||
"""Generate examples for object classification."""
|
||||
model_name = sanitize_filename(body.model_name)
|
||||
collect_object_classification_examples(model_name, body.label)
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Example generation completed"},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
@@ -1,17 +1,31 @@
|
||||
from typing import List
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class RenameFaceBody(BaseModel):
|
||||
new_name: str
|
||||
new_name: str = Field(description="New name for the face")
|
||||
|
||||
|
||||
class AudioTranscriptionBody(BaseModel):
|
||||
event_id: str
|
||||
event_id: str = Field(description="ID of the event to transcribe audio for")
|
||||
|
||||
|
||||
class DeleteFaceImagesBody(BaseModel):
|
||||
ids: List[str] = Field(
|
||||
description="List of image filenames to delete from the face folder"
|
||||
)
|
||||
|
||||
|
||||
class GenerateStateExamplesBody(BaseModel):
|
||||
model_name: str = Field(description="Name of the classification model")
|
||||
cameras: Dict[str, Tuple[float, float, float, float]] = Field(
|
||||
description="Dictionary mapping camera names to normalized crop coordinates in [x1, y1, x2, y2] format (values 0-1)"
|
||||
)
|
||||
|
||||
|
||||
class GenerateObjectExamplesBody(BaseModel):
|
||||
model_name: str = Field(description="Name of the classification model")
|
||||
label: str = Field(
|
||||
description="Object label to collect examples for (e.g., 'person', 'car')"
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import List
|
||||
import psutil
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filepath
|
||||
from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
@@ -26,7 +27,7 @@ from frigate.api.defs.response.export_response import (
|
||||
)
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import EXPORT_DIR
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
@@ -88,7 +89,14 @@ def export_recording(
|
||||
playback_factor = body.playback
|
||||
playback_source = body.source
|
||||
friendly_name = body.name
|
||||
existing_image = body.image_path
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Invalid image path"}),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if playback_source == "recordings":
|
||||
recordings_count = (
|
||||
|
||||
@@ -488,6 +488,8 @@ class FrigateApp:
|
||||
}
|
||||
).execute()
|
||||
|
||||
self.config.auth.admin_first_time_login = True
|
||||
|
||||
logger.info("********************************************************")
|
||||
logger.info("********************************************************")
|
||||
logger.info("*** Auth is enabled, but no users exist. ***")
|
||||
|
||||
@@ -38,6 +38,13 @@ class AuthConfig(FrigateBaseModel):
|
||||
default_factory=dict,
|
||||
title="Role to camera mappings. Empty list grants access to all cameras.",
|
||||
)
|
||||
admin_first_time_login: Optional[bool] = Field(
|
||||
default=False,
|
||||
title="Internal field to expose first-time admin login flag to the UI",
|
||||
description=(
|
||||
"When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. "
|
||||
),
|
||||
)
|
||||
|
||||
@field_validator("roles")
|
||||
@classmethod
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
from ..base import FrigateBaseModel
|
||||
|
||||
__all__ = ["ReviewConfig", "DetectionsConfig", "AlertsConfig"]
|
||||
__all__ = ["ReviewConfig", "DetectionsConfig", "AlertsConfig", "ImageSourceEnum"]
|
||||
|
||||
|
||||
class ImageSourceEnum(str, Enum):
|
||||
"""Image source options for GenAI Review."""
|
||||
|
||||
preview = "preview"
|
||||
recordings = "recordings"
|
||||
|
||||
|
||||
DEFAULT_ALERT_OBJECTS = ["person", "car"]
|
||||
@@ -77,6 +85,10 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
)
|
||||
alerts: bool = Field(default=True, title="Enable GenAI for alerts.")
|
||||
detections: bool = Field(default=False, title="Enable GenAI for detections.")
|
||||
image_source: ImageSourceEnum = Field(
|
||||
default=ImageSourceEnum.preview,
|
||||
title="Image source for review descriptions.",
|
||||
)
|
||||
additional_concerns: list[str] = Field(
|
||||
default=[],
|
||||
title="Additional concerns that GenAI should make note of on this camera.",
|
||||
@@ -93,13 +105,34 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
default=None,
|
||||
)
|
||||
activity_context_prompt: str = Field(
|
||||
default="""- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
|
||||
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
|
||||
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
|
||||
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
|
||||
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
|
||||
""",
|
||||
title="Custom activity context prompt defining normal activity patterns for this property.",
|
||||
default="""### Normal Activity Indicators (Level 0)
|
||||
- Known/verified people in any zone
|
||||
- People with pets in residential areas
|
||||
- Deliveries: carrying packages to porches/doors, placing packages, leaving
|
||||
- Access to private areas: entering back yards, garages, or homes
|
||||
- Brief movement through semi-public areas (driveways, front yards) with clear purpose (carrying items, going to/from vehicles)
|
||||
- Activity on public areas only (sidewalks, streets) without entering property
|
||||
- Services/maintenance with visible indicators (tools, uniforms, work vehicles)
|
||||
|
||||
### Suspicious Activity Indicators (Level 1)
|
||||
- Testing doors or windows on vehicles or buildings
|
||||
- Standing near vehicles or in private zones without clear purpose or direct movement to destination
|
||||
- Taking items from property (packages, objects from porches/driveways)
|
||||
- Accessing areas at unusual hours without visible legitimate indicators (items, tools, purpose)
|
||||
- Climbing or jumping fences/barriers
|
||||
- Attempting to conceal actions or items
|
||||
- Person in semi-public areas (driveways, front yards) at unusual hours without clear purpose
|
||||
|
||||
### Critical Threat Indicators (Level 2)
|
||||
- Holding break-in tools (crowbars, pry bars, bolt cutters)
|
||||
- Weapons visible (guns, knives, bats used aggressively)
|
||||
- Forced entry in progress
|
||||
- Physical aggression or violence
|
||||
- Active property damage or theft
|
||||
|
||||
### Assessment Guidance
|
||||
These patterns are guidance, not absolute rules. Context matters: time of day, visible items/tools, and apparent purpose help distinguish normal from suspicious. Not all cameras show full entry/exit paths - focus on observable behavior in frame. Use judgment based on the complete picture.""",
|
||||
title="Custom activity context prompt defining normal and suspicious activity patterns for this property.",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ class BirdClassificationConfig(FrigateBaseModel):
|
||||
|
||||
|
||||
class CustomClassificationStateCameraConfig(FrigateBaseModel):
|
||||
crop: list[int, int, int, int] = Field(
|
||||
crop: list[float, float, float, float] = Field(
|
||||
title="Crop of image frame on this camera to run classification on."
|
||||
)
|
||||
|
||||
@@ -197,7 +197,9 @@ class FaceRecognitionConfig(FrigateBaseModel):
|
||||
title="Min face recognitions for the sub label to be applied to the person object.",
|
||||
)
|
||||
save_attempts: int = Field(
|
||||
default=100, ge=0, title="Number of face attempts to save in the train tab."
|
||||
default=200,
|
||||
ge=0,
|
||||
title="Number of face attempts to save in the recent recognitions tab.",
|
||||
)
|
||||
blur_confidence_filter: bool = Field(
|
||||
default=True, title="Apply blur quality filter to face confidence."
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import copy
|
||||
import datetime
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
@@ -10,22 +11,27 @@ from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import cv2
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.review import GenAIReviewConfig
|
||||
from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
|
||||
from frigate.data_processing.types import PostProcessDataEnum
|
||||
from frigate.genai import GenAIClient
|
||||
from frigate.models import ReviewSegment
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||
from frigate.util.image import get_image_from_recording
|
||||
|
||||
from ..post.api import PostProcessorApi
|
||||
from ..types import DataProcessorMetrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
RECORDING_BUFFER_START_SECONDS = 5
|
||||
RECORDING_BUFFER_END_SECONDS = 10
|
||||
|
||||
|
||||
class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
def __init__(
|
||||
@@ -43,20 +49,35 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
self.review_descs_dps = EventsPerSecond()
|
||||
self.review_descs_dps.start()
|
||||
|
||||
def calculate_frame_count(self) -> int:
|
||||
"""Calculate optimal number of frames based on context size."""
|
||||
# With our preview images (height of 180px) each image should be ~100 tokens per image
|
||||
# We want to be conservative to not have too long of query times with too many images
|
||||
def calculate_frame_count(
|
||||
self, image_source: ImageSourceEnum = ImageSourceEnum.preview
|
||||
) -> int:
|
||||
"""Calculate optimal number of frames based on context size and image source."""
|
||||
context_size = self.genai_client.get_context_size()
|
||||
|
||||
if context_size > 10000:
|
||||
return 20
|
||||
elif context_size > 6000:
|
||||
return 16
|
||||
elif context_size > 4000:
|
||||
return 12
|
||||
if image_source == ImageSourceEnum.recordings:
|
||||
# With recordings at 480p resolution (480px height), each image uses ~200-300 tokens
|
||||
# This is ~2-3x more than preview images, so we reduce frame count accordingly
|
||||
# to avoid exceeding context limits and maintain reasonable inference times
|
||||
if context_size > 10000:
|
||||
return 12
|
||||
elif context_size > 6000:
|
||||
return 10
|
||||
elif context_size > 4000:
|
||||
return 8
|
||||
else:
|
||||
return 6
|
||||
else:
|
||||
return 8
|
||||
# With preview images (180px height), each image uses ~100 tokens
|
||||
# We can send more frames since they're lower resolution
|
||||
if context_size > 10000:
|
||||
return 20
|
||||
elif context_size > 6000:
|
||||
return 16
|
||||
elif context_size > 4000:
|
||||
return 12
|
||||
else:
|
||||
return 8
|
||||
|
||||
def process_data(self, data, data_type):
|
||||
self.metrics.review_desc_dps.value = self.review_descs_dps.eps()
|
||||
@@ -88,36 +109,50 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
):
|
||||
return
|
||||
|
||||
frames = self.get_cache_frames(
|
||||
camera, final_data["start_time"], final_data["end_time"]
|
||||
)
|
||||
image_source = camera_config.review.genai.image_source
|
||||
|
||||
if not frames:
|
||||
frames = [final_data["thumb_path"]]
|
||||
|
||||
thumbs = []
|
||||
|
||||
for idx, thumb_path in enumerate(frames):
|
||||
thumb_data = cv2.imread(thumb_path)
|
||||
ret, jpg = cv2.imencode(
|
||||
".jpg", thumb_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100]
|
||||
if image_source == ImageSourceEnum.recordings:
|
||||
thumbs = self.get_recording_frames(
|
||||
camera,
|
||||
final_data["start_time"] - RECORDING_BUFFER_START_SECONDS,
|
||||
final_data["end_time"] + RECORDING_BUFFER_END_SECONDS,
|
||||
height=480, # Use 480p for good balance between quality and token usage
|
||||
)
|
||||
|
||||
if ret:
|
||||
thumbs.append(jpg.tobytes())
|
||||
|
||||
if camera_config.review.genai.debug_save_thumbnails:
|
||||
id = data["after"]["id"]
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", f"{id}")).mkdir(
|
||||
if not thumbs:
|
||||
# Fallback to preview frames if no recordings available
|
||||
logger.warning(
|
||||
f"No recording frames found for {camera}, falling back to preview frames"
|
||||
)
|
||||
thumbs = self.get_preview_frames_as_bytes(
|
||||
camera,
|
||||
final_data["start_time"],
|
||||
final_data["end_time"],
|
||||
final_data["thumb_path"],
|
||||
id,
|
||||
camera_config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
elif camera_config.review.genai.debug_save_thumbnails:
|
||||
# Save debug thumbnails for recordings
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", id)).mkdir(
|
||||
parents=True, exist_ok=True
|
||||
)
|
||||
shutil.copy(
|
||||
thumb_path,
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"genai-requests/{id}/{idx}.webp",
|
||||
),
|
||||
)
|
||||
for idx, frame_bytes in enumerate(thumbs):
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"genai-requests/{id}/{idx}.jpg"),
|
||||
"wb",
|
||||
) as f:
|
||||
f.write(frame_bytes)
|
||||
else:
|
||||
# Use preview frames
|
||||
thumbs = self.get_preview_frames_as_bytes(
|
||||
camera,
|
||||
final_data["start_time"],
|
||||
final_data["end_time"],
|
||||
final_data["thumb_path"],
|
||||
id,
|
||||
camera_config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
|
||||
# kickoff analysis
|
||||
self.review_descs_dps.update()
|
||||
@@ -231,6 +266,122 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
return selected_frames
|
||||
|
||||
def get_recording_frames(
|
||||
self,
|
||||
camera: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
height: int = 480,
|
||||
) -> list[bytes]:
|
||||
"""Get frames from recordings at specified timestamps."""
|
||||
duration = end_time - start_time
|
||||
desired_frame_count = self.calculate_frame_count(ImageSourceEnum.recordings)
|
||||
|
||||
# Calculate evenly spaced timestamps throughout the duration
|
||||
if desired_frame_count == 1:
|
||||
timestamps = [start_time + duration / 2]
|
||||
else:
|
||||
step = duration / (desired_frame_count - 1)
|
||||
timestamps = [start_time + (i * step) for i in range(desired_frame_count)]
|
||||
|
||||
def extract_frame_from_recording(ts: float) -> bytes | None:
|
||||
"""Extract a single frame from recording at given timestamp."""
|
||||
try:
|
||||
recording = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
Recordings.start_time,
|
||||
)
|
||||
.where((ts >= Recordings.start_time) & (ts <= Recordings.end_time))
|
||||
.where(Recordings.camera == camera)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
.get()
|
||||
)
|
||||
|
||||
time_in_segment = ts - recording.start_time
|
||||
return get_image_from_recording(
|
||||
self.config.ffmpeg,
|
||||
recording.path,
|
||||
time_in_segment,
|
||||
"mjpeg",
|
||||
height=height,
|
||||
)
|
||||
except DoesNotExist:
|
||||
return None
|
||||
|
||||
frames = []
|
||||
|
||||
for timestamp in timestamps:
|
||||
try:
|
||||
# Try to extract frame at exact timestamp
|
||||
image_data = extract_frame_from_recording(timestamp)
|
||||
|
||||
if not image_data:
|
||||
# Try with rounded timestamp as fallback
|
||||
rounded_timestamp = math.ceil(timestamp)
|
||||
image_data = extract_frame_from_recording(rounded_timestamp)
|
||||
|
||||
if image_data:
|
||||
frames.append(image_data)
|
||||
else:
|
||||
logger.warning(
|
||||
f"No recording found for {camera} at timestamp {timestamp}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error extracting frame from recording for {camera} at {timestamp}: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
return frames
|
||||
|
||||
def get_preview_frames_as_bytes(
|
||||
self,
|
||||
camera: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
thumb_path_fallback: str,
|
||||
review_id: str,
|
||||
save_debug: bool,
|
||||
) -> list[bytes]:
|
||||
"""Get preview frames and convert them to JPEG bytes.
|
||||
|
||||
Args:
|
||||
camera: Camera name
|
||||
start_time: Start timestamp
|
||||
end_time: End timestamp
|
||||
thumb_path_fallback: Fallback thumbnail path if no preview frames found
|
||||
review_id: Review item ID for debug saving
|
||||
save_debug: Whether to save debug thumbnails
|
||||
|
||||
Returns:
|
||||
List of JPEG image bytes
|
||||
"""
|
||||
frame_paths = self.get_cache_frames(camera, start_time, end_time)
|
||||
if not frame_paths:
|
||||
frame_paths = [thumb_path_fallback]
|
||||
|
||||
thumbs = []
|
||||
for idx, thumb_path in enumerate(frame_paths):
|
||||
thumb_data = cv2.imread(thumb_path)
|
||||
ret, jpg = cv2.imencode(
|
||||
".jpg", thumb_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100]
|
||||
)
|
||||
if ret:
|
||||
thumbs.append(jpg.tobytes())
|
||||
|
||||
if save_debug:
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", review_id)).mkdir(
|
||||
parents=True, exist_ok=True
|
||||
)
|
||||
shutil.copy(
|
||||
thumb_path,
|
||||
os.path.join(CLIPS_DIR, f"genai-requests/{review_id}/{idx}.webp"),
|
||||
)
|
||||
|
||||
return thumbs
|
||||
|
||||
|
||||
@staticmethod
|
||||
def run_analysis(
|
||||
@@ -254,25 +405,25 @@ def run_analysis(
|
||||
"duration": round(final_data["end_time"] - final_data["start_time"]),
|
||||
}
|
||||
|
||||
objects = []
|
||||
named_objects = []
|
||||
unified_objects = []
|
||||
|
||||
objects_list = final_data["data"]["objects"]
|
||||
sub_labels_list = final_data["data"]["sub_labels"]
|
||||
|
||||
for i, verified_label in enumerate(final_data["data"]["verified_objects"]):
|
||||
object_type = verified_label.replace("-verified", "").replace("_", " ")
|
||||
name = sub_labels_list[i].replace("_", " ").title()
|
||||
unified_objects.append(f"{name} ({object_type})")
|
||||
|
||||
# Add non-verified objects as "Unknown (type)"
|
||||
for label in objects_list:
|
||||
if "-verified" in label:
|
||||
continue
|
||||
elif label in labelmap_objects:
|
||||
objects.append(label.replace("_", " ").title())
|
||||
object_type = label.replace("_", " ")
|
||||
unified_objects.append(f"Unknown ({object_type})")
|
||||
|
||||
for i, verified_label in enumerate(final_data["data"]["verified_objects"]):
|
||||
named_objects.append(
|
||||
f"{sub_labels_list[i].replace('_', ' ').title()} ({verified_label.replace('-verified', '')})"
|
||||
)
|
||||
|
||||
analytics_data["objects"] = objects
|
||||
analytics_data["recognized_objects"] = named_objects
|
||||
analytics_data["unified_objects"] = unified_objects
|
||||
|
||||
metadata = genai_client.generate_review_description(
|
||||
analytics_data,
|
||||
|
||||
@@ -34,6 +34,8 @@ except ModuleNotFoundError:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_OBJECT_CLASSIFICATIONS = 16
|
||||
|
||||
|
||||
class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
def __init__(
|
||||
@@ -53,9 +55,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.labelmap: dict[int, str] = {}
|
||||
self.classifications_per_second = EventsPerSecond()
|
||||
self.inference_speed = InferenceSpeed(
|
||||
self.metrics.classification_speeds[self.model_config.name]
|
||||
)
|
||||
self.state_history: dict[str, dict[str, Any]] = {}
|
||||
|
||||
if (
|
||||
self.metrics
|
||||
and self.model_config.name in self.metrics.classification_speeds
|
||||
):
|
||||
self.inference_speed = InferenceSpeed(
|
||||
self.metrics.classification_speeds[self.model_config.name]
|
||||
)
|
||||
else:
|
||||
self.inference_speed = None
|
||||
|
||||
self.last_run = datetime.datetime.now().timestamp()
|
||||
self.__build_detector()
|
||||
|
||||
@@ -83,12 +94,50 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
self.classifications_per_second.update()
|
||||
self.inference_speed.update(duration)
|
||||
if self.inference_speed:
|
||||
self.inference_speed.update(duration)
|
||||
|
||||
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
|
||||
"""
|
||||
Verify state change requires 3 consecutive identical states before publishing.
|
||||
Returns state to publish or None if verification not complete.
|
||||
"""
|
||||
if camera not in self.state_history:
|
||||
self.state_history[camera] = {
|
||||
"current_state": None,
|
||||
"pending_state": None,
|
||||
"consecutive_count": 0,
|
||||
}
|
||||
|
||||
verification = self.state_history[camera]
|
||||
|
||||
if detected_state == verification["current_state"]:
|
||||
verification["pending_state"] = None
|
||||
verification["consecutive_count"] = 0
|
||||
return None
|
||||
|
||||
if detected_state == verification["pending_state"]:
|
||||
verification["consecutive_count"] += 1
|
||||
|
||||
if verification["consecutive_count"] >= 3:
|
||||
verification["current_state"] = detected_state
|
||||
verification["pending_state"] = None
|
||||
verification["consecutive_count"] = 0
|
||||
return detected_state
|
||||
else:
|
||||
verification["pending_state"] = detected_state
|
||||
verification["consecutive_count"] = 1
|
||||
logger.debug(
|
||||
f"New state '{detected_state}' detected for {camera}, need {3 - verification['consecutive_count']} more consecutive detections"
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
|
||||
self.metrics.classification_cps[
|
||||
self.model_config.name
|
||||
].value = self.classifications_per_second.eps()
|
||||
if self.metrics and self.model_config.name in self.metrics.classification_cps:
|
||||
self.metrics.classification_cps[
|
||||
self.model_config.name
|
||||
].value = self.classifications_per_second.eps()
|
||||
camera = frame_data.get("camera")
|
||||
|
||||
if camera not in self.model_config.state_config.cameras:
|
||||
@@ -96,10 +145,10 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
camera_config = self.model_config.state_config.cameras[camera]
|
||||
crop = [
|
||||
camera_config.crop[0],
|
||||
camera_config.crop[1],
|
||||
camera_config.crop[2],
|
||||
camera_config.crop[3],
|
||||
camera_config.crop[0] * self.config.cameras[camera].detect.width,
|
||||
camera_config.crop[1] * self.config.cameras[camera].detect.height,
|
||||
camera_config.crop[2] * self.config.cameras[camera].detect.width,
|
||||
camera_config.crop[3] * self.config.cameras[camera].detect.height,
|
||||
]
|
||||
should_run = False
|
||||
|
||||
@@ -121,6 +170,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.last_run = now
|
||||
should_run = True
|
||||
|
||||
# Shortcut: always run if we have a pending state verification to complete
|
||||
if (
|
||||
not should_run
|
||||
and camera in self.state_history
|
||||
and self.state_history[camera]["pending_state"] is not None
|
||||
and now > self.last_run + 0.5
|
||||
):
|
||||
self.last_run = now
|
||||
should_run = True
|
||||
logger.debug(
|
||||
f"Running verification check for pending state: {self.state_history[camera]['pending_state']} ({self.state_history[camera]['consecutive_count']}/3)"
|
||||
)
|
||||
|
||||
if not should_run:
|
||||
return
|
||||
|
||||
@@ -178,10 +240,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
score,
|
||||
)
|
||||
|
||||
if score >= self.model_config.threshold:
|
||||
if score < self.model_config.threshold:
|
||||
logger.debug(
|
||||
f"Score {score} below threshold {self.model_config.threshold}, skipping verification"
|
||||
)
|
||||
return
|
||||
|
||||
detected_state = self.labelmap[best_id]
|
||||
verified_state = self.verify_state_change(camera, detected_state)
|
||||
|
||||
if verified_state is not None:
|
||||
self.requestor.send_data(
|
||||
f"{camera}/classification/{self.model_config.name}",
|
||||
self.labelmap[best_id],
|
||||
verified_state,
|
||||
)
|
||||
|
||||
def handle_request(self, topic, request_data):
|
||||
@@ -220,12 +291,20 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.detected_objects: dict[str, float] = {}
|
||||
self.classification_history: dict[str, list[tuple[str, float, float]]] = {}
|
||||
self.labelmap: dict[int, str] = {}
|
||||
self.classifications_per_second = EventsPerSecond()
|
||||
self.inference_speed = InferenceSpeed(
|
||||
self.metrics.classification_speeds[self.model_config.name]
|
||||
)
|
||||
|
||||
if (
|
||||
self.metrics
|
||||
and self.model_config.name in self.metrics.classification_speeds
|
||||
):
|
||||
self.inference_speed = InferenceSpeed(
|
||||
self.metrics.classification_speeds[self.model_config.name]
|
||||
)
|
||||
else:
|
||||
self.inference_speed = None
|
||||
|
||||
self.__build_detector()
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
@@ -251,12 +330,64 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
self.classifications_per_second.update()
|
||||
self.inference_speed.update(duration)
|
||||
if self.inference_speed:
|
||||
self.inference_speed.update(duration)
|
||||
|
||||
def get_weighted_score(
|
||||
self,
|
||||
object_id: str,
|
||||
current_label: str,
|
||||
current_score: float,
|
||||
current_time: float,
|
||||
) -> tuple[str | None, float]:
|
||||
"""
|
||||
Determine weighted score based on history to prevent false positives/negatives.
|
||||
Requires 60% of attempts to agree on a label before publishing.
|
||||
Returns (weighted_label, weighted_score) or (None, 0.0) if no weighted score.
|
||||
"""
|
||||
if object_id not in self.classification_history:
|
||||
self.classification_history[object_id] = []
|
||||
|
||||
self.classification_history[object_id].append(
|
||||
(current_label, current_score, current_time)
|
||||
)
|
||||
|
||||
history = self.classification_history[object_id]
|
||||
|
||||
if len(history) < 3:
|
||||
return None, 0.0
|
||||
|
||||
label_counts = {}
|
||||
label_scores = {}
|
||||
total_attempts = len(history)
|
||||
|
||||
for label, score, timestamp in history:
|
||||
if label not in label_counts:
|
||||
label_counts[label] = 0
|
||||
label_scores[label] = []
|
||||
|
||||
label_counts[label] += 1
|
||||
label_scores[label].append(score)
|
||||
|
||||
best_label = max(label_counts, key=label_counts.get)
|
||||
best_count = label_counts[best_label]
|
||||
|
||||
consensus_threshold = total_attempts * 0.6
|
||||
if best_count < consensus_threshold:
|
||||
return None, 0.0
|
||||
|
||||
avg_score = sum(label_scores[best_label]) / len(label_scores[best_label])
|
||||
|
||||
if best_label == "none":
|
||||
return None, 0.0
|
||||
|
||||
return best_label, avg_score
|
||||
|
||||
def process_frame(self, obj_data, frame):
|
||||
self.metrics.classification_cps[
|
||||
self.model_config.name
|
||||
].value = self.classifications_per_second.eps()
|
||||
if self.metrics and self.model_config.name in self.metrics.classification_cps:
|
||||
self.metrics.classification_cps[
|
||||
self.model_config.name
|
||||
].value = self.classifications_per_second.eps()
|
||||
|
||||
if obj_data["false_positive"]:
|
||||
return
|
||||
@@ -264,6 +395,21 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
if obj_data["label"] not in self.model_config.object_config.objects:
|
||||
return
|
||||
|
||||
if obj_data.get("end_time") is not None:
|
||||
return
|
||||
|
||||
if obj_data.get("stationary"):
|
||||
return
|
||||
|
||||
object_id = obj_data["id"]
|
||||
|
||||
if (
|
||||
object_id in self.classification_history
|
||||
and len(self.classification_history[object_id])
|
||||
>= MAX_OBJECT_CLASSIFICATIONS
|
||||
):
|
||||
return
|
||||
|
||||
now = datetime.datetime.now().timestamp()
|
||||
x, y, x2, y2 = calculate_region(
|
||||
frame.shape,
|
||||
@@ -295,7 +441,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
obj_data["id"],
|
||||
object_id,
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
@@ -311,13 +457,12 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
probs = res / res.sum(axis=0)
|
||||
best_id = np.argmax(probs)
|
||||
score = round(probs[best_id], 2)
|
||||
previous_score = self.detected_objects.get(obj_data["id"], 0.0)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
obj_data["id"],
|
||||
object_id,
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
@@ -327,30 +472,34 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
logger.debug(f"Score {score} is less than threshold.")
|
||||
return
|
||||
|
||||
if score <= previous_score:
|
||||
logger.debug(f"Score {score} is worse than previous score {previous_score}")
|
||||
return
|
||||
|
||||
sub_label = self.labelmap[best_id]
|
||||
self.detected_objects[obj_data["id"]] = score
|
||||
|
||||
if (
|
||||
self.model_config.object_config.classification_type
|
||||
== ObjectClassificationType.sub_label
|
||||
):
|
||||
if sub_label != "none":
|
||||
consensus_label, consensus_score = self.get_weighted_score(
|
||||
object_id, sub_label, score, now
|
||||
)
|
||||
|
||||
if consensus_label is not None:
|
||||
if (
|
||||
self.model_config.object_config.classification_type
|
||||
== ObjectClassificationType.sub_label
|
||||
):
|
||||
self.sub_label_publisher.publish(
|
||||
(obj_data["id"], sub_label, score),
|
||||
(object_id, consensus_label, consensus_score),
|
||||
EventMetadataTypeEnum.sub_label,
|
||||
)
|
||||
elif (
|
||||
self.model_config.object_config.classification_type
|
||||
== ObjectClassificationType.attribute
|
||||
):
|
||||
self.sub_label_publisher.publish(
|
||||
(obj_data["id"], self.model_config.name, sub_label, score),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
elif (
|
||||
self.model_config.object_config.classification_type
|
||||
== ObjectClassificationType.attribute
|
||||
):
|
||||
self.sub_label_publisher.publish(
|
||||
(
|
||||
object_id,
|
||||
self.model_config.name,
|
||||
consensus_label,
|
||||
consensus_score,
|
||||
),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
|
||||
def handle_request(self, topic, request_data):
|
||||
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
||||
@@ -368,8 +517,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
return None
|
||||
|
||||
def expire_object(self, object_id, camera):
|
||||
if object_id in self.detected_objects:
|
||||
self.detected_objects.pop(object_id)
|
||||
if object_id in self.classification_history:
|
||||
self.classification_history.pop(object_id)
|
||||
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Any
|
||||
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
|
||||
from frigate.comms.embeddings_updater import (
|
||||
EmbeddingsRequestEnum,
|
||||
@@ -95,6 +96,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
CameraConfigUpdateEnum.semantic_search,
|
||||
],
|
||||
)
|
||||
self.classification_config_subscriber = ConfigSubscriber(
|
||||
"config/classification/custom/"
|
||||
)
|
||||
|
||||
# Configure Frigate DB
|
||||
db = SqliteVecQueueDatabase(
|
||||
@@ -255,6 +259,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
"""Maintain a SQLite-vec database for semantic search."""
|
||||
while not self.stop_event.is_set():
|
||||
self.config_updater.check_for_updates()
|
||||
self._check_classification_config_updates()
|
||||
self._process_requests()
|
||||
self._process_updates()
|
||||
self._process_recordings_updates()
|
||||
@@ -265,6 +270,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self._process_event_metadata()
|
||||
|
||||
self.config_updater.stop()
|
||||
self.classification_config_subscriber.stop()
|
||||
self.event_subscriber.stop()
|
||||
self.event_end_subscriber.stop()
|
||||
self.recordings_subscriber.stop()
|
||||
@@ -275,6 +281,46 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.requestor.stop()
|
||||
logger.info("Exiting embeddings maintenance...")
|
||||
|
||||
def _check_classification_config_updates(self) -> None:
|
||||
"""Check for classification config updates and add new processors."""
|
||||
topic, model_config = self.classification_config_subscriber.check_for_update()
|
||||
|
||||
if topic and model_config:
|
||||
model_name = topic.split("/")[-1]
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
)
|
||||
return
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.metrics,
|
||||
)
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
|
||||
def _process_requests(self) -> None:
|
||||
"""Process embeddings requests"""
|
||||
|
||||
|
||||
@@ -150,10 +150,10 @@ PRESETS_HW_ACCEL_SCALE["preset-rk-h265"] = PRESETS_HW_ACCEL_SCALE[FFMPEG_HWACCEL
|
||||
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
|
||||
"preset-rpi-64-h264": "{0} -hide_banner {1} -c:v h264_v4l2m2m {2}",
|
||||
"preset-rpi-64-h265": "{0} -hide_banner {1} -c:v hevc_v4l2m2m {2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v main -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} {3} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -hwaccel device {3} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}",
|
||||
FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
@@ -246,7 +246,7 @@ def parse_preset_hardware_acceleration_scale(
|
||||
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5" in scale
|
||||
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
|
||||
):
|
||||
scale.replace(
|
||||
scale = scale.replace(
|
||||
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
|
||||
":format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
)
|
||||
|
||||
@@ -63,58 +63,69 @@ class GenAIClient:
|
||||
else:
|
||||
return ""
|
||||
|
||||
def get_verified_objects() -> str:
|
||||
if review_data["recognized_objects"]:
|
||||
return " - " + "\n - ".join(review_data["recognized_objects"])
|
||||
def get_objects_list() -> str:
|
||||
if review_data["unified_objects"]:
|
||||
return "\n- " + "\n- ".join(review_data["unified_objects"])
|
||||
else:
|
||||
return " None"
|
||||
return "\n- (No objects detected)"
|
||||
|
||||
context_prompt = f"""
|
||||
Please analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
||||
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
||||
|
||||
## Normal Activity Patterns for This Property
|
||||
|
||||
**Normal activity patterns for this property:**
|
||||
{activity_context_prompt}
|
||||
|
||||
## Task Instructions
|
||||
|
||||
Your task is to provide a clear, accurate description of the scene that:
|
||||
1. States exactly what is happening based on observable actions and movements.
|
||||
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
|
||||
2. Evaluates the activity against the Normal and Suspicious Activity Indicators above.
|
||||
3. Assigns a potential_threat_level based on the definitions below, applying them consistently.
|
||||
|
||||
**IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.**
|
||||
**Use the activity patterns above as guidance to calibrate your assessment. Match the activity against both normal and suspicious indicators, then use your judgment based on the complete context.**
|
||||
|
||||
## Analysis Guidelines
|
||||
|
||||
When forming your description:
|
||||
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
|
||||
- **CRITICAL: Only describe objects explicitly listed in "Objects in Scene" below.** Do not infer or mention additional people, vehicles, or objects not present in this list, even if visual patterns suggest them. If only a car is listed, do not describe a person interacting with it unless "person" is also in the objects list.
|
||||
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
|
||||
- Describe what you observe: actions, movements, interactions with objects and the environment. Include any observable environmental changes (e.g., lighting changes triggered by activity).
|
||||
- Note visible details such as clothing, items being carried or placed, tools or equipment present, and how they interact with the property or objects.
|
||||
- Consider the full sequence chronologically: what happens from start to finish, how duration and actions relate to the location and objects involved.
|
||||
- **Use the actual timestamp provided in "Activity started at"** below for time of day context—do not infer time from image brightness or darkness. Unusual hours (late night/early morning) should increase suspicion when the observable behavior itself appears questionable. However, recognize that some legitimate activities can occur at any hour.
|
||||
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
|
||||
- **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention.
|
||||
- **Weigh all evidence holistically**: Match the activity against both the normal and suspicious patterns above, then evaluate based on the complete context (zone, objects, time, actions). Activities matching normal patterns should be Level 0. Activities matching suspicious indicators should be Level 1. Use your judgment for edge cases.
|
||||
|
||||
## Response Format
|
||||
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `title` (string): A concise, one-sentence title that captures the main activity. Include any verified recognized objects (from the "Verified recognized objects" list below) and key detected objects. Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night".
|
||||
- `title` (string): A concise, one-sentence title that captures the main activity. Use the exact names from "Objects in Scene" below (e.g., if the list shows "Joe (person)" and "Unknown (person)", say "Joe and unknown person"). Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night", "Joe and unknown person in driveway".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
|
||||
{get_concern_prompt()}
|
||||
|
||||
Threat-level definitions:
|
||||
- 0 — **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidence—considering zone context, detected objects, and timing together—supports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
|
||||
- 1 — **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.**
|
||||
- 2 — **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
|
||||
## Threat Level Definitions
|
||||
|
||||
- 0 — **Normal activity**: The observable activity aligns with the Normal Activity Patterns above. The evidence—considering zone, objects, time, and actions together—supports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
|
||||
- 1 — **Potentially suspicious**: The observable activity aligns with the Suspicious Activity Indicators above, or shows behavior that raises genuine security concerns. The activity warrants human review. **Use this level when the evidence suggests concerning behavior, even if not an immediate threat.**
|
||||
- 2 — **Immediate threat**: Clear evidence of active criminal activity, forced entry, break-in, vandalism, aggression, weapons, theft in progress, or property damage.
|
||||
|
||||
## Sequence Details
|
||||
|
||||
Sequence details:
|
||||
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
||||
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
|
||||
- Detected objects: {", ".join(review_data["objects"])}
|
||||
- Verified recognized objects (use these names when describing these objects):
|
||||
{get_verified_objects()}
|
||||
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"}
|
||||
|
||||
**IMPORTANT:**
|
||||
## Objects in Scene
|
||||
|
||||
Each line represents one object in the scene. Named objects are verified identities; "Unknown" indicates unverified objects of that type:
|
||||
{get_objects_list()}
|
||||
|
||||
## Important Notes
|
||||
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
|
||||
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
|
||||
- Only describe objects from the "Objects in Scene" list above. Do not hallucinate additional objects.
|
||||
- When describing people or vehicles, use the exact names provided.
|
||||
{get_language_prompt()}
|
||||
"""
|
||||
logger.debug(
|
||||
@@ -149,7 +160,10 @@ Sequence details:
|
||||
try:
|
||||
metadata = ReviewMetadata.model_validate_json(clean_json)
|
||||
|
||||
if review_data["recognized_objects"]:
|
||||
if any(
|
||||
not obj.startswith("Unknown")
|
||||
for obj in review_data["unified_objects"]
|
||||
):
|
||||
metadata.potential_threat_level = 0
|
||||
|
||||
metadata.time = review_data["start"]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Ollama Provider for Frigate AI."""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from httpx import TimeoutException
|
||||
from ollama import Client as ApiClient
|
||||
@@ -17,10 +17,24 @@ logger = logging.getLogger(__name__)
|
||||
class OllamaClient(GenAIClient):
|
||||
"""Generative AI client for Frigate using Ollama."""
|
||||
|
||||
LOCAL_OPTIMIZED_OPTIONS = {
|
||||
"options": {
|
||||
"temperature": 0.5,
|
||||
"repeat_penalty": 1.05,
|
||||
"presence_penalty": 0.3,
|
||||
},
|
||||
}
|
||||
|
||||
provider: ApiClient
|
||||
provider_options: dict[str, Any]
|
||||
|
||||
def _init_provider(self):
|
||||
"""Initialize the client."""
|
||||
self.provider_options = {
|
||||
**self.LOCAL_OPTIMIZED_OPTIONS,
|
||||
**self.genai_config.provider_options,
|
||||
}
|
||||
|
||||
try:
|
||||
client = ApiClient(host=self.genai_config.base_url, timeout=self.timeout)
|
||||
# ensure the model is available locally
|
||||
@@ -48,7 +62,7 @@ class OllamaClient(GenAIClient):
|
||||
self.genai_config.model,
|
||||
prompt,
|
||||
images=images if images else None,
|
||||
**self.genai_config.provider_options,
|
||||
**self.provider_options,
|
||||
)
|
||||
return result["response"].strip()
|
||||
except (TimeoutException, ResponseError) as e:
|
||||
|
||||
@@ -2,12 +2,15 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
from collections import defaultdict
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FfmpegConfig
|
||||
from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
@@ -15,7 +18,10 @@ from frigate.const import (
|
||||
UPDATE_MODEL_STATE,
|
||||
)
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.process import FrigateProcess
|
||||
|
||||
BATCH_SIZE = 16
|
||||
@@ -69,6 +75,7 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
logger.info(f"Kicking off classification training for {self.model_name}.")
|
||||
dataset_dir = os.path.join(CLIPS_DIR, self.model_name, "dataset")
|
||||
model_dir = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
num_classes = len(
|
||||
[
|
||||
d
|
||||
@@ -139,7 +146,6 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
f.write(tflite_model)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def kickoff_model_training(
|
||||
embeddingRequestor: EmbeddingsRequestor, model_name: str
|
||||
) -> None:
|
||||
@@ -172,3 +178,520 @@ def kickoff_model_training(
|
||||
},
|
||||
)
|
||||
requestor.stop()
|
||||
|
||||
|
||||
@staticmethod
|
||||
def collect_state_classification_examples(
|
||||
model_name: str, cameras: dict[str, tuple[float, float, float, float]]
|
||||
) -> None:
|
||||
"""
|
||||
Collect representative state classification examples from review items.
|
||||
|
||||
This function:
|
||||
1. Queries review items from specified cameras
|
||||
2. Selects 100 balanced timestamps across the data
|
||||
3. Extracts keyframes from recordings (cropped to specified regions)
|
||||
4. Selects 20 most visually distinct images
|
||||
5. Saves them to the dataset directory
|
||||
|
||||
Args:
|
||||
model_name: Name of the classification model
|
||||
cameras: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
|
||||
"""
|
||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Step 1: Get review items for the cameras
|
||||
camera_names = list(cameras.keys())
|
||||
review_items = list(
|
||||
ReviewSegment.select()
|
||||
.where(ReviewSegment.camera.in_(camera_names))
|
||||
.where(ReviewSegment.end_time.is_null(False))
|
||||
.order_by(ReviewSegment.start_time.asc())
|
||||
)
|
||||
|
||||
if not review_items:
|
||||
logger.warning(f"No review items found for cameras: {camera_names}")
|
||||
return
|
||||
|
||||
# Step 2: Create balanced timestamp selection (100 samples)
|
||||
timestamps = _select_balanced_timestamps(review_items, target_count=100)
|
||||
|
||||
# Step 3: Extract keyframes from recordings with crops applied
|
||||
keyframes = _extract_keyframes(
|
||||
"/usr/lib/ffmpeg/7.0/bin/ffmpeg", timestamps, temp_dir, cameras
|
||||
)
|
||||
|
||||
# Step 4: Select 24 most visually distinct images (they're already cropped)
|
||||
distinct_images = _select_distinct_images(keyframes, target_count=24)
|
||||
|
||||
# Step 5: Save to train directory for later classification
|
||||
train_dir = os.path.join(CLIPS_DIR, model_name, "train")
|
||||
os.makedirs(train_dir, exist_ok=True)
|
||||
|
||||
saved_count = 0
|
||||
for idx, image_path in enumerate(distinct_images):
|
||||
dest_path = os.path.join(train_dir, f"example_{idx:03d}.jpg")
|
||||
try:
|
||||
img = cv2.imread(image_path)
|
||||
|
||||
if img is not None:
|
||||
cv2.imwrite(dest_path, img)
|
||||
saved_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save image {image_path}: {e}")
|
||||
|
||||
import shutil
|
||||
|
||||
try:
|
||||
shutil.rmtree(temp_dir)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to clean up temp directory: {e}")
|
||||
|
||||
|
||||
def _select_balanced_timestamps(
|
||||
review_items: list[ReviewSegment], target_count: int = 100
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Select balanced timestamps from review items.
|
||||
|
||||
Strategy:
|
||||
- Group review items by camera and time of day
|
||||
- Sample evenly across groups to ensure diversity
|
||||
- For each selected review item, pick a random timestamp within its duration
|
||||
|
||||
Returns:
|
||||
List of dicts with keys: camera, timestamp, review_item
|
||||
"""
|
||||
# Group by camera and hour of day for temporal diversity
|
||||
grouped = defaultdict(list)
|
||||
|
||||
for item in review_items:
|
||||
camera = item.camera
|
||||
# Group by 6-hour blocks for temporal diversity
|
||||
hour_block = int(item.start_time // (6 * 3600))
|
||||
key = f"{camera}_{hour_block}"
|
||||
grouped[key].append(item)
|
||||
|
||||
# Calculate how many samples per group
|
||||
num_groups = len(grouped)
|
||||
if num_groups == 0:
|
||||
return []
|
||||
|
||||
samples_per_group = max(1, target_count // num_groups)
|
||||
timestamps = []
|
||||
|
||||
# Sample from each group
|
||||
for group_items in grouped.values():
|
||||
# Take samples_per_group items from this group
|
||||
sample_size = min(samples_per_group, len(group_items))
|
||||
sampled_items = random.sample(group_items, sample_size)
|
||||
|
||||
for item in sampled_items:
|
||||
# Pick a random timestamp within the review item's duration
|
||||
duration = item.end_time - item.start_time
|
||||
if duration <= 0:
|
||||
continue
|
||||
|
||||
# Sample from middle 80% to avoid edge artifacts
|
||||
offset = random.uniform(duration * 0.1, duration * 0.9)
|
||||
timestamp = item.start_time + offset
|
||||
|
||||
timestamps.append(
|
||||
{
|
||||
"camera": item.camera,
|
||||
"timestamp": timestamp,
|
||||
"review_item": item,
|
||||
}
|
||||
)
|
||||
|
||||
# If we don't have enough, sample more from larger groups
|
||||
while len(timestamps) < target_count and len(timestamps) < len(review_items):
|
||||
for group_items in grouped.values():
|
||||
if len(timestamps) >= target_count:
|
||||
break
|
||||
|
||||
# Pick a random item not already sampled
|
||||
item = random.choice(group_items)
|
||||
duration = item.end_time - item.start_time
|
||||
if duration <= 0:
|
||||
continue
|
||||
|
||||
offset = random.uniform(duration * 0.1, duration * 0.9)
|
||||
timestamp = item.start_time + offset
|
||||
|
||||
# Check if we already have a timestamp near this one
|
||||
if not any(abs(t["timestamp"] - timestamp) < 1.0 for t in timestamps):
|
||||
timestamps.append(
|
||||
{
|
||||
"camera": item.camera,
|
||||
"timestamp": timestamp,
|
||||
"review_item": item,
|
||||
}
|
||||
)
|
||||
|
||||
return timestamps[:target_count]
|
||||
|
||||
|
||||
def _extract_keyframes(
|
||||
ffmpeg_path: str,
|
||||
timestamps: list[dict],
|
||||
output_dir: str,
|
||||
camera_crops: dict[str, tuple[float, float, float, float]],
|
||||
) -> list[str]:
|
||||
"""
|
||||
Extract keyframes from recordings at specified timestamps and crop to specified regions.
|
||||
|
||||
Args:
|
||||
ffmpeg_path: Path to ffmpeg binary
|
||||
timestamps: List of timestamp dicts from _select_balanced_timestamps
|
||||
output_dir: Directory to save extracted frames
|
||||
camera_crops: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
|
||||
|
||||
Returns:
|
||||
List of paths to successfully extracted and cropped keyframe images
|
||||
"""
|
||||
keyframe_paths = []
|
||||
|
||||
for idx, ts_info in enumerate(timestamps):
|
||||
camera = ts_info["camera"]
|
||||
timestamp = ts_info["timestamp"]
|
||||
|
||||
if camera not in camera_crops:
|
||||
logger.warning(f"No crop coordinates for camera {camera}")
|
||||
continue
|
||||
|
||||
norm_x1, norm_y1, norm_x2, norm_y2 = camera_crops[camera]
|
||||
|
||||
try:
|
||||
recording = (
|
||||
Recordings.select()
|
||||
.where(
|
||||
(timestamp >= Recordings.start_time)
|
||||
& (timestamp <= Recordings.end_time)
|
||||
& (Recordings.camera == camera)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
.get()
|
||||
)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
relative_time = timestamp - recording.start_time
|
||||
|
||||
try:
|
||||
config = FfmpegConfig(path="/usr/lib/ffmpeg/7.0")
|
||||
image_data = get_image_from_recording(
|
||||
config,
|
||||
recording.path,
|
||||
relative_time,
|
||||
codec="mjpeg",
|
||||
height=None,
|
||||
)
|
||||
|
||||
if image_data:
|
||||
nparr = np.frombuffer(image_data, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
|
||||
if img is not None:
|
||||
height, width = img.shape[:2]
|
||||
|
||||
x1 = int(norm_x1 * width)
|
||||
y1 = int(norm_y1 * height)
|
||||
x2 = int(norm_x2 * width)
|
||||
y2 = int(norm_y2 * height)
|
||||
|
||||
x1_clipped = max(0, min(x1, width))
|
||||
y1_clipped = max(0, min(y1, height))
|
||||
x2_clipped = max(0, min(x2, width))
|
||||
y2_clipped = max(0, min(y2, height))
|
||||
|
||||
if x2_clipped > x1_clipped and y2_clipped > y1_clipped:
|
||||
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
|
||||
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
keyframe_paths.append(output_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to extract frame from {recording.path} at {relative_time}s: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
return keyframe_paths
|
||||
|
||||
|
||||
def _select_distinct_images(
|
||||
image_paths: list[str], target_count: int = 20
|
||||
) -> list[str]:
|
||||
"""
|
||||
Select the most visually distinct images from a set of keyframes.
|
||||
|
||||
Uses a greedy algorithm based on image histograms:
|
||||
1. Start with a random image
|
||||
2. Iteratively add the image that is most different from already selected images
|
||||
3. Difference is measured using histogram comparison
|
||||
|
||||
Args:
|
||||
image_paths: List of paths to candidate images
|
||||
target_count: Number of distinct images to select
|
||||
|
||||
Returns:
|
||||
List of paths to selected images
|
||||
"""
|
||||
if len(image_paths) <= target_count:
|
||||
return image_paths
|
||||
|
||||
histograms = {}
|
||||
valid_paths = []
|
||||
|
||||
for path in image_paths:
|
||||
try:
|
||||
img = cv2.imread(path)
|
||||
|
||||
if img is None:
|
||||
continue
|
||||
|
||||
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
||||
hist = cv2.calcHist(
|
||||
[hsv], [0, 1, 2], None, [8, 8, 8], [0, 180, 0, 256, 0, 256]
|
||||
)
|
||||
hist = cv2.normalize(hist, hist).flatten()
|
||||
histograms[path] = hist
|
||||
valid_paths.append(path)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to process image {path}: {e}")
|
||||
continue
|
||||
|
||||
if len(valid_paths) <= target_count:
|
||||
return valid_paths
|
||||
|
||||
selected = []
|
||||
first_image = random.choice(valid_paths)
|
||||
selected.append(first_image)
|
||||
remaining = [p for p in valid_paths if p != first_image]
|
||||
|
||||
while len(selected) < target_count and remaining:
|
||||
max_min_distance = -1
|
||||
best_candidate = None
|
||||
|
||||
for candidate in remaining:
|
||||
min_distance = float("inf")
|
||||
|
||||
for selected_img in selected:
|
||||
distance = cv2.compareHist(
|
||||
histograms[candidate],
|
||||
histograms[selected_img],
|
||||
cv2.HISTCMP_BHATTACHARYYA,
|
||||
)
|
||||
min_distance = min(min_distance, distance)
|
||||
|
||||
if min_distance > max_min_distance:
|
||||
max_min_distance = min_distance
|
||||
best_candidate = candidate
|
||||
|
||||
if best_candidate:
|
||||
selected.append(best_candidate)
|
||||
remaining.remove(best_candidate)
|
||||
else:
|
||||
break
|
||||
|
||||
return selected
|
||||
|
||||
|
||||
@staticmethod
|
||||
def collect_object_classification_examples(
|
||||
model_name: str,
|
||||
label: str,
|
||||
) -> None:
|
||||
"""
|
||||
Collect representative object classification examples from event thumbnails.
|
||||
|
||||
This function:
|
||||
1. Queries events for the specified label
|
||||
2. Selects 100 balanced events across different cameras and times
|
||||
3. Retrieves thumbnails for selected events (with 33% center crop applied)
|
||||
4. Selects 24 most visually distinct thumbnails
|
||||
5. Saves to dataset directory
|
||||
|
||||
Args:
|
||||
model_name: Name of the classification model
|
||||
label: Object label to collect (e.g., "person", "car")
|
||||
cameras: List of camera names to collect examples from
|
||||
"""
|
||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Step 1: Query events for the specified label and cameras
|
||||
events = list(
|
||||
Event.select().where((Event.label == label)).order_by(Event.start_time.asc())
|
||||
)
|
||||
|
||||
if not events:
|
||||
logger.warning(f"No events found for label '{label}'")
|
||||
return
|
||||
|
||||
logger.debug(f"Found {len(events)} events")
|
||||
|
||||
# Step 2: Select balanced events (100 samples)
|
||||
selected_events = _select_balanced_events(events, target_count=100)
|
||||
logger.debug(f"Selected {len(selected_events)} events")
|
||||
|
||||
# Step 3: Extract thumbnails from events
|
||||
thumbnails = _extract_event_thumbnails(selected_events, temp_dir)
|
||||
logger.debug(f"Successfully extracted {len(thumbnails)} thumbnails")
|
||||
|
||||
# Step 4: Select 24 most visually distinct thumbnails
|
||||
distinct_images = _select_distinct_images(thumbnails, target_count=24)
|
||||
logger.debug(f"Selected {len(distinct_images)} distinct images")
|
||||
|
||||
# Step 5: Save to train directory for later classification
|
||||
train_dir = os.path.join(CLIPS_DIR, model_name, "train")
|
||||
os.makedirs(train_dir, exist_ok=True)
|
||||
|
||||
saved_count = 0
|
||||
for idx, image_path in enumerate(distinct_images):
|
||||
dest_path = os.path.join(train_dir, f"example_{idx:03d}.jpg")
|
||||
try:
|
||||
img = cv2.imread(image_path)
|
||||
|
||||
if img is not None:
|
||||
cv2.imwrite(dest_path, img)
|
||||
saved_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save image {image_path}: {e}")
|
||||
|
||||
import shutil
|
||||
|
||||
try:
|
||||
shutil.rmtree(temp_dir)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to clean up temp directory: {e}")
|
||||
|
||||
logger.debug(
|
||||
f"Successfully collected {saved_count} classification examples in {train_dir}"
|
||||
)
|
||||
|
||||
|
||||
def _select_balanced_events(
|
||||
events: list[Event], target_count: int = 100
|
||||
) -> list[Event]:
|
||||
"""
|
||||
Select balanced events from the event list.
|
||||
|
||||
Strategy:
|
||||
- Group events by camera and time of day
|
||||
- Sample evenly across groups to ensure diversity
|
||||
- Prioritize events with higher scores
|
||||
|
||||
Returns:
|
||||
List of selected events
|
||||
"""
|
||||
grouped = defaultdict(list)
|
||||
|
||||
for event in events:
|
||||
camera = event.camera
|
||||
hour_block = int(event.start_time // (6 * 3600))
|
||||
key = f"{camera}_{hour_block}"
|
||||
grouped[key].append(event)
|
||||
|
||||
num_groups = len(grouped)
|
||||
if num_groups == 0:
|
||||
return []
|
||||
|
||||
samples_per_group = max(1, target_count // num_groups)
|
||||
selected = []
|
||||
|
||||
for group_events in grouped.values():
|
||||
sorted_events = sorted(
|
||||
group_events,
|
||||
key=lambda e: e.data.get("score", 0) if e.data else 0,
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
sample_size = min(samples_per_group, len(sorted_events))
|
||||
selected.extend(sorted_events[:sample_size])
|
||||
|
||||
if len(selected) < target_count:
|
||||
remaining = [e for e in events if e not in selected]
|
||||
remaining_sorted = sorted(
|
||||
remaining,
|
||||
key=lambda e: e.data.get("score", 0) if e.data else 0,
|
||||
reverse=True,
|
||||
)
|
||||
needed = target_count - len(selected)
|
||||
selected.extend(remaining_sorted[:needed])
|
||||
|
||||
return selected[:target_count]
|
||||
|
||||
|
||||
def _extract_event_thumbnails(events: list[Event], output_dir: str) -> list[str]:
|
||||
"""
|
||||
Extract thumbnails from events and save to disk.
|
||||
|
||||
Args:
|
||||
events: List of Event objects
|
||||
output_dir: Directory to save thumbnails
|
||||
|
||||
Returns:
|
||||
List of paths to successfully extracted thumbnail images
|
||||
"""
|
||||
thumbnail_paths = []
|
||||
|
||||
for idx, event in enumerate(events):
|
||||
try:
|
||||
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||
|
||||
if thumbnail_bytes:
|
||||
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
|
||||
if img is not None:
|
||||
height, width = img.shape[:2]
|
||||
|
||||
crop_size = 1.0
|
||||
if event.data and "box" in event.data and "region" in event.data:
|
||||
box = event.data["box"]
|
||||
region = event.data["region"]
|
||||
|
||||
if len(box) == 4 and len(region) == 4:
|
||||
box_w, box_h = box[2], box[3]
|
||||
region_w, region_h = region[2], region[3]
|
||||
|
||||
box_area = (box_w * box_h) / (region_w * region_h)
|
||||
|
||||
if box_area < 0.05:
|
||||
crop_size = 0.4
|
||||
elif box_area < 0.10:
|
||||
crop_size = 0.5
|
||||
elif box_area < 0.20:
|
||||
crop_size = 0.65
|
||||
elif box_area < 0.35:
|
||||
crop_size = 0.80
|
||||
else:
|
||||
crop_size = 0.95
|
||||
|
||||
crop_width = int(width * crop_size)
|
||||
crop_height = int(height * crop_size)
|
||||
|
||||
x1 = (width - crop_width) // 2
|
||||
y1 = (height - crop_height) // 2
|
||||
x2 = x1 + crop_width
|
||||
y2 = y1 + crop_height
|
||||
|
||||
cropped = img[y1:y2, x1:x2]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
thumbnail_paths.append(output_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to extract thumbnail for event {event.id}: {e}")
|
||||
continue
|
||||
|
||||
return thumbnail_paths
|
||||
|
||||
@@ -577,7 +577,7 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
|
||||
if detailed and format_entries:
|
||||
ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
|
||||
|
||||
ffprobe_cmd.extend(["-loglevel", "quiet", clean_path])
|
||||
ffprobe_cmd.extend(["-loglevel", "error", clean_path])
|
||||
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
@@ -207,6 +207,14 @@
|
||||
"length": {
|
||||
"feet": "peus",
|
||||
"meters": "metres"
|
||||
},
|
||||
"data": {
|
||||
"kbps": "Kb/s",
|
||||
"mbps": "Mb/s",
|
||||
"gbps": "Gb/s",
|
||||
"kbph": "kB/hora",
|
||||
"mbph": "MB/hora",
|
||||
"gbph": "GB/hora"
|
||||
}
|
||||
},
|
||||
"label": {
|
||||
@@ -270,5 +278,8 @@
|
||||
"desc": "Pàgina no trobada"
|
||||
},
|
||||
"selectItem": "Selecciona {{item}}",
|
||||
"readTheDocumentation": "Llegir la documentació"
|
||||
"readTheDocumentation": "Llegir la documentació",
|
||||
"information": {
|
||||
"pixels": "{{area}}px"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +98,8 @@
|
||||
"button": {
|
||||
"deleteNow": "Suprimir ara",
|
||||
"export": "Exportar",
|
||||
"markAsReviewed": "Marcar com a revisat"
|
||||
"markAsReviewed": "Marcar com a revisat",
|
||||
"markAsUnreviewed": "Marcar com no revisat"
|
||||
},
|
||||
"confirmDelete": {
|
||||
"title": "Confirmar la supressió",
|
||||
|
||||
@@ -130,6 +130,9 @@
|
||||
"playInBackground": {
|
||||
"label": "Reproduir en segon pla",
|
||||
"tips": "Habilita aquesta opció per a contiuar la transmissió tot i que el reproductor estigui ocult."
|
||||
},
|
||||
"debug": {
|
||||
"picker": "Selecció de stream no disponible en mode debug. La vista debug sempre fa servir el stream assignat pel rol de detecció."
|
||||
}
|
||||
},
|
||||
"streamingSettings": "Paràmetres de transmissió",
|
||||
@@ -167,5 +170,14 @@
|
||||
"transcription": {
|
||||
"enable": "Habilita la transcripció d'àudio en temps real",
|
||||
"disable": "Deshabilita la transcripció d'àudio en temps real"
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "Descarregar una instantània",
|
||||
"noVideoSource": "No hi ha cap font de video per fer una instantània.",
|
||||
"captureFailed": "Error capturant una instantània.",
|
||||
"downloadStarted": "Inici de baixada d'instantània."
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "Sense càmeres per configurar"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
"masksAndZones": "Editor de màscares i zones - Frigate",
|
||||
"general": "Paràmetres Generals - Frigate",
|
||||
"frigatePlus": "Paràmetres de Frigate+ - Frigate",
|
||||
"notifications": "Paràmetres de notificació - Frigate"
|
||||
"notifications": "Paràmetres de notificació - Frigate",
|
||||
"cameraManagement": "Gestionar càmeres - Frigate",
|
||||
"cameraReview": "Configuració Revisió de Càmeres - Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"ui": "Interfície d'usuari",
|
||||
@@ -21,7 +23,10 @@
|
||||
"debug": "Depuració",
|
||||
"frigateplus": "Frigate+",
|
||||
"enrichments": "Enriquiments",
|
||||
"triggers": "Disparadors"
|
||||
"triggers": "Disparadors",
|
||||
"cameraManagement": "Gestió",
|
||||
"cameraReview": "Revisió",
|
||||
"roles": "Rols"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
@@ -825,5 +830,16 @@
|
||||
"userUpdateFailed": "Error a l'actualitzar els ros d'usuari: {{errorMessage}}"
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraWizard": {
|
||||
"title": "Afegir C àmera",
|
||||
"description": "Seguiu els passos de sota per afegir una nova càmera a la instal·lació.",
|
||||
"steps": {
|
||||
"nameAndConnection": "Nom i connexió",
|
||||
"streamConfiguration": "Configuració de stream"
|
||||
},
|
||||
"step1": {
|
||||
"cameraBrand": "Marca de la càmera"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"object": "Ladění - Frigate",
|
||||
"general": "Obecné nastavení - Frigate",
|
||||
"frigatePlus": "Frigate+ nastavení - Frigate",
|
||||
"enrichments": "Nastavení obohacení - Frigate"
|
||||
"enrichments": "Nastavení obohacení - Frigate",
|
||||
"cameraManagement": "Správa kamer - Frigate"
|
||||
},
|
||||
"frigatePlus": {
|
||||
"toast": {
|
||||
|
||||
@@ -232,6 +232,14 @@
|
||||
"length": {
|
||||
"feet": "Fuß",
|
||||
"meters": "Meter"
|
||||
},
|
||||
"data": {
|
||||
"kbps": "kB/s",
|
||||
"mbps": "MB/s",
|
||||
"gbps": "GB/s",
|
||||
"kbph": "kB/Stunde",
|
||||
"mbph": "MB/Stunde",
|
||||
"gbph": "GB/Stunde"
|
||||
}
|
||||
},
|
||||
"toast": {
|
||||
@@ -273,5 +281,8 @@
|
||||
"desc": "Du hast keine Berechtigung diese Seite anzuzeigen.",
|
||||
"documentTitle": "Zugang verweigert - Frigate",
|
||||
"title": "Zugang verweigert"
|
||||
},
|
||||
"information": {
|
||||
"pixels": "{{area}}px"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,7 +117,8 @@
|
||||
"button": {
|
||||
"export": "Exportieren",
|
||||
"markAsReviewed": "Als geprüft markieren",
|
||||
"deleteNow": "Jetzt löschen"
|
||||
"deleteNow": "Jetzt löschen",
|
||||
"markAsUnreviewed": "Als ungeprüft markieren"
|
||||
}
|
||||
},
|
||||
"imagePicker": {
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"donut": "Donut",
|
||||
"cake": "Kuchen",
|
||||
"chair": "Stuhl",
|
||||
"couch": "Couch",
|
||||
"couch": "Sofa",
|
||||
"bed": "Bett",
|
||||
"dining_table": "Esstisch",
|
||||
"toilet": "Toilette",
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"selectFace": "Wähle Gesicht",
|
||||
"imageEntry": {
|
||||
"dropActive": "Ziehe das Bild hierher…",
|
||||
"dropInstructions": "Ziehe ein Bild hier her oder klicke um eines auszuwählen",
|
||||
"dropInstructions": "Ziehe ein Bild hier her, füge es ein oder klicke um eines auszuwählen",
|
||||
"maxSize": "Maximale Größe: {{size}} MB",
|
||||
"validation": {
|
||||
"selectImage": "Bitte wähle ein Bild aus."
|
||||
|
||||
@@ -30,16 +30,16 @@
|
||||
},
|
||||
"zoom": {
|
||||
"in": {
|
||||
"label": "PTZ-Kamera vergrößern"
|
||||
"label": "PTZ-Kamera rein zoomen"
|
||||
},
|
||||
"out": {
|
||||
"label": "PTZ-Kamera herauszoomen"
|
||||
"label": "PTZ-Kamera heraus zoomen"
|
||||
}
|
||||
},
|
||||
"presets": "PTZ-Kameravoreinstellungen",
|
||||
"presets": "PTZ-Kamera Voreinstellungen",
|
||||
"frame": {
|
||||
"center": {
|
||||
"label": "Klicken Sie in den Rahmen, um die PTZ-Kamera zu zentrieren"
|
||||
"label": "Klicke in den Rahmen, um die PTZ-Kamera zu zentrieren"
|
||||
}
|
||||
},
|
||||
"focus": {
|
||||
@@ -62,8 +62,8 @@
|
||||
"enable": "Aufzeichnung aktivieren"
|
||||
},
|
||||
"snapshots": {
|
||||
"enable": "Snapshots aktivieren",
|
||||
"disable": "Snapshots deaktivieren"
|
||||
"enable": "Schnappschüsse aktivieren",
|
||||
"disable": "Schnappschüsse deaktivieren"
|
||||
},
|
||||
"autotracking": {
|
||||
"disable": "Autotracking deaktivieren",
|
||||
@@ -74,7 +74,7 @@
|
||||
"disable": "Stream-Statistiken ausblenden"
|
||||
},
|
||||
"manualRecording": {
|
||||
"title": "On-Demand Aufzeichnung",
|
||||
"title": "On-Demand",
|
||||
"showStats": {
|
||||
"label": "Statistiken anzeigen",
|
||||
"desc": "Aktivieren Sie diese Option, um Stream-Statistiken als Overlay über dem Kamera-Feed anzuzeigen."
|
||||
@@ -88,7 +88,7 @@
|
||||
"desc": "Aktivieren Sie diese Option, um das Streaming fortzusetzen, wenn der Player ausgeblendet ist.",
|
||||
"label": "Im Hintergrund abspielen"
|
||||
},
|
||||
"tips": "Starten Sie ein manuelles Ereignis basierend auf den Aufzeichnung Aufbewahrungseinstellungen dieser Kamera.",
|
||||
"tips": "Lade einen Sofort-Schnappschuss herunter oder starte ein manuelles Ereignis basierend auf den Aufbewahrungseinstellungen für Aufzeichnungen dieser Kamera.",
|
||||
"debugView": "Debug-Ansicht",
|
||||
"start": "On-Demand Aufzeichnung starten",
|
||||
"failedToEnd": "Die manuelle On-Demand Aufzeichnung konnte nicht beendet werden."
|
||||
@@ -118,6 +118,9 @@
|
||||
"playInBackground": {
|
||||
"tips": "Aktivieren Sie diese Option, um das Streaming fortzusetzen, wenn der Player ausgeblendet ist.",
|
||||
"label": "Im Hintergrund abspielen"
|
||||
},
|
||||
"debug": {
|
||||
"picker": "Stream Auswahl nicht verfügbar im Debug Modus. Die Debug Ansicht nutzt immer den Stream, welcher der Rolle zugewiesen ist."
|
||||
}
|
||||
},
|
||||
"effectiveRetainMode": {
|
||||
@@ -167,5 +170,16 @@
|
||||
"transcription": {
|
||||
"enable": "Live Audio Transkription einschalten",
|
||||
"disable": "Live Audio Transkription ausschalten"
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "Keine Kameras eingerichtet",
|
||||
"description": "Beginne indem du eine Kamera anschließt.",
|
||||
"buttonText": "Kamera hinzufügen"
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "Sofort-Schnappschuss herunterladen",
|
||||
"noVideoSource": "Keine Video-Quelle für Schnappschuss verfügbar.",
|
||||
"captureFailed": "Die Aufnahme des Schnappschusses ist fehlgeschlagen.",
|
||||
"downloadStarted": "Schnappschuss Download gestartet."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
"classification": "Klassifizierungseinstellungen – Frigate",
|
||||
"motionTuner": "Bewegungserkennungs-Optimierer – Frigate",
|
||||
"notifications": "Benachrichtigungs-Einstellungen",
|
||||
"enrichments": "Erweiterte Statistiken - Frigate"
|
||||
"enrichments": "Erweiterte Statistiken - Frigate",
|
||||
"cameraManagement": "Kameras verwalten - Frigate",
|
||||
"cameraReview": "Kamera Einstellungen prüfen - Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"ui": "Benutzeroberfläche",
|
||||
@@ -23,7 +25,10 @@
|
||||
"users": "Benutzer",
|
||||
"notifications": "Benachrichtigungen",
|
||||
"enrichments": "Erkennungsfunktionen",
|
||||
"triggers": "Auslöser"
|
||||
"triggers": "Auslöser",
|
||||
"roles": "Rollen",
|
||||
"cameraManagement": "Verwaltung",
|
||||
"cameraReview": "Überprüfung"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
@@ -69,7 +74,7 @@
|
||||
"title": "Kalender",
|
||||
"firstWeekday": {
|
||||
"label": "Erster Wochentag",
|
||||
"desc": "Der Tag, an dem die Wochen des Review Kalenders beginnen.",
|
||||
"desc": "Der Tag, an dem die Wochen des Überprüfungs-Kalenders beginnen.",
|
||||
"sunday": "Sonntag",
|
||||
"monday": "Montag"
|
||||
}
|
||||
@@ -812,6 +817,11 @@
|
||||
"error": {
|
||||
"min": "Mindesten eine Aktion muss ausgewählt sein."
|
||||
}
|
||||
},
|
||||
"friendly_name": {
|
||||
"title": "Nutzerfreundlicher Name",
|
||||
"placeholder": "Benenne oder beschreibe diesen Auslöser",
|
||||
"description": "Ein optionaler nutzerfreundlicher Name oder eine Beschreibung für diesen Auslöser."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -826,6 +836,10 @@
|
||||
"updateTriggerFailed": "Auslöser könnte nicht aktualisiert werden: {{errorMessage}}",
|
||||
"deleteTriggerFailed": "Auslöser konnte nicht gelöscht werden: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"semanticSearch": {
|
||||
"title": "Semantische Suche ist deaktiviert",
|
||||
"desc": "Semantische Suche muss aktiviert sein um Auslöser nutzen zu können."
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
@@ -887,5 +901,222 @@
|
||||
"userUpdateFailed": "Aktualisierung der Benutzerrollen fehlgeschlagen: {{errorMessage}}"
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraWizard": {
|
||||
"title": "Kamera hinzufügen",
|
||||
"description": "Folge den Anweisungen unten, um eine neue Kamera zu deiner Frigate-Installation hinzuzufügen.",
|
||||
"steps": {
|
||||
"nameAndConnection": "Name & Verbindung",
|
||||
"streamConfiguration": "Stream Konfiguration",
|
||||
"validationAndTesting": "Überprüfung & Testen"
|
||||
},
|
||||
"save": {
|
||||
"success": "Neue Kamera {{cameraName}} erfolgreich hinzugefügt.",
|
||||
"failure": "Fehler beim Speichern von {{cameraName}}."
|
||||
},
|
||||
"testResultLabels": {
|
||||
"resolution": "Auflösung",
|
||||
"video": "Video",
|
||||
"audio": "Audio",
|
||||
"fps": "FPS"
|
||||
},
|
||||
"commonErrors": {
|
||||
"noUrl": "Bitte korrekte Stream-URL eingeben",
|
||||
"testFailed": "Stream Test fehlgeschlagen: {{error}}"
|
||||
},
|
||||
"step1": {
|
||||
"description": "Gib deine Kameradaten ein und teste die Verbindung.",
|
||||
"cameraName": "Kamera-Name",
|
||||
"cameraNamePlaceholder": "z.B. vordere_tür oder Hof Übersicht",
|
||||
"host": "Host/IP Adresse",
|
||||
"port": "Port",
|
||||
"username": "Nutzername",
|
||||
"usernamePlaceholder": "Optional",
|
||||
"password": "Passwort",
|
||||
"passwordPlaceholder": "Optional",
|
||||
"selectTransport": "Transport-Protokoll auswählen",
|
||||
"cameraBrand": "Kamera-Hersteller",
|
||||
"selectBrand": "Wähle die Kamera-Hersteller für die URL-Vorlage aus",
|
||||
"customUrl": "Benutzerdefinierte Stream-URL",
|
||||
"brandInformation": "Hersteller Information",
|
||||
"brandUrlFormat": "Für Kameras mit RTSP URL nutze folgendes Format: {{exampleUrl}}",
|
||||
"customUrlPlaceholder": "rtsp://nutzername:passwort@host:port/pfad",
|
||||
"testConnection": "Teste Verbindung",
|
||||
"testSuccess": "Verbindungstest erfolgreich!",
|
||||
"testFailed": "Verbindungstest fehlgeschlagen. Bitte prüfe deine Eingaben und versuche es erneut.",
|
||||
"streamDetails": "Stream Details",
|
||||
"warnings": {
|
||||
"noSnapshot": "Es kann kein Snapshot aus dem konfigurierten Stream abgerufen werden."
|
||||
},
|
||||
"errors": {
|
||||
"brandOrCustomUrlRequired": "Wählen Sie entweder einen Kamera-Hersteller mit Host/IP aus oder wählen Sie „Andere“ mit einer benutzerdefinierten URL",
|
||||
"nameRequired": "Kamera-Name benötigt",
|
||||
"nameLength": "Kamera-Name darf höchsten 64 Zeichen lang sein",
|
||||
"invalidCharacters": "Kamera-Name enthält ungültige Zeichen",
|
||||
"nameExists": "Kamera-Name existiert bereits",
|
||||
"brands": {
|
||||
"reolink-rtsp": "Reolink RTSP wird nicht empfohlen. Es wird empfohlen, http in den Kameraeinstellungen zu aktivieren und den Kamera-Assistenten neu zu starten."
|
||||
}
|
||||
},
|
||||
"docs": {
|
||||
"reolink": "https://docs.frigate.video/configuration/camera_specific.html#reolink-cameras"
|
||||
}
|
||||
},
|
||||
"step2": {
|
||||
"description": "Konfigurieren Sie Stream-Rollen und fügen Sie zusätzliche Streams für Ihre Kamera hinzu.",
|
||||
"streamsTitle": "Kamera Streams",
|
||||
"addStream": "Stream hinzufügen",
|
||||
"addAnotherStream": "Weiteren Stream hinzufügen",
|
||||
"streamTitle": "Stream {{nummer}}",
|
||||
"streamUrl": "Stream URL",
|
||||
"streamUrlPlaceholder": "rtsp://nutzername:passwort@host:port/pfad",
|
||||
"url": "URL",
|
||||
"resolution": "Auflösung",
|
||||
"selectResolution": "Auflösung auswählen",
|
||||
"quality": "Qualität",
|
||||
"selectQuality": "Qualität auswählen",
|
||||
"roles": "Rollen",
|
||||
"roleLabels": {
|
||||
"detect": "Objekt-Erkennung",
|
||||
"record": "Aufzeichnung",
|
||||
"audio": "Audio"
|
||||
},
|
||||
"testStream": "Verbindung testen",
|
||||
"testSuccess": "Stream erfolgreich getestet!",
|
||||
"testFailed": "Stream-Test fehlgeschlagen",
|
||||
"testFailedTitle": "Test fehlgeschlagen",
|
||||
"connected": "Verbunden",
|
||||
"notConnected": "Nicht verbunden",
|
||||
"featuresTitle": "Funktionen",
|
||||
"go2rtc": "Verbindungen zur Kamera reduzieren",
|
||||
"detectRoleWarning": "Mindestens ein Stream muss die Rolle „detect“ haben, um fortfahren zu können.",
|
||||
"rolesPopover": {
|
||||
"title": "Stream Rollen",
|
||||
"detect": "Haupt-Feed für Objekt-Erkennung.",
|
||||
"record": "Speichert Segmente des Video-Feeds basierend auf den Konfigurationseinstellungen.",
|
||||
"audio": "Feed für audiobasierte Erkennung."
|
||||
},
|
||||
"featuresPopover": {
|
||||
"title": "Stream Funktionen",
|
||||
"description": "Verwende go2rtc Restreaming, um die Verbindungen zu deiner Kamera zu reduzieren."
|
||||
}
|
||||
},
|
||||
"step3": {
|
||||
"description": "Endgültige Validierung und Analyse vor dem Speichern Ihrer neuen Kamera. Verbinde jeden Stream vor dem Speichern.",
|
||||
"validationTitle": "Stream Validierung",
|
||||
"connectAllStreams": "Verbinde alle Streams",
|
||||
"reconnectionSuccess": "Wiederverbindung erfolgreich.",
|
||||
"reconnectionPartial": "Einige Streams konnten nicht wieder verbunden werden.",
|
||||
"streamUnavailable": "Stream-Vorschau nicht verfügbar",
|
||||
"reload": "Neu laden",
|
||||
"connecting": "Verbinde...",
|
||||
"streamTitle": "Stream {{number}}",
|
||||
"valid": "Gültig",
|
||||
"failed": "Fehlgeschlagen",
|
||||
"notTested": "Nicht getestet",
|
||||
"connectStream": "Verbinden",
|
||||
"connectingStream": "Verbinde",
|
||||
"disconnectStream": "Trennen",
|
||||
"estimatedBandwidth": "Geschätzte Bandbreite",
|
||||
"roles": "Rollen",
|
||||
"none": "Keine",
|
||||
"error": "Fehler",
|
||||
"streamValidated": "Stream {{number}} wurde erfolgreich validiert",
|
||||
"streamValidationFailed": "Stream {{number}} Validierung fehlgeschlagen",
|
||||
"saveAndApply": "Neue Kamera speichern",
|
||||
"saveError": "Ungültige Konfiguration. Bitte prüfe die Einstellungen.",
|
||||
"issues": {
|
||||
"title": "Stream Validierung",
|
||||
"videoCodecGood": "Video-Codec ist {{codec}}.",
|
||||
"audioCodecGood": "Audio-Codec ist {{codec}}.",
|
||||
"noAudioWarning": "Für diesen Stream wurde kein Ton erkannt, die Aufzeichnungen enthalten keinen Ton.",
|
||||
"audioCodecRecordError": "Der AAC-Audio-Codec ist erforderlich, um Audio in Aufnahmen zu unterstützen.",
|
||||
"audioCodecRequired": "Ein Audiostream ist erforderlich, um Audioerkennung zu unterstützen.",
|
||||
"restreamingWarning": "Eine Reduzierung der Verbindungen zur Kamera für den Aufzeichnungsstream kann zu einer etwas höheren CPU-Auslastung führen.",
|
||||
"dahua": {
|
||||
"substreamWarning": "Substream 1 ist auf eine niedrige Auflösung festgelegt. Viele Kameras von Dahua / Amcrest / EmpireTech unterstützen zusätzliche Substreams, die in den Kameraeinstellungen aktiviert werden müssen. Es wird empfohlen, diese Streams zu nutzen, sofern sie verfügbar sind."
|
||||
},
|
||||
"hikvision": {
|
||||
"substreamWarning": "Substream 1 ist auf eine niedrige Auflösung festgelegt. Viele Hikvision-Kameras unterstützen zusätzliche Substreams, die in den Kameraeinstellungen aktiviert werden müssen. Es wird empfohlen, diese Streams zu nutzen, sofern sie verfügbar sind."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraManagement": {
|
||||
"title": "Kameras verwalten",
|
||||
"addCamera": "Neue Kamera hinzufügen",
|
||||
"editCamera": "Kamera bearbeiten:",
|
||||
"selectCamera": "Wähle eine Kamera",
|
||||
"backToSettings": "Zurück zu Kamera-Einstellungen",
|
||||
"streams": {
|
||||
"title": "Kameras aktivieren / deaktivieren",
|
||||
"desc": "Deaktiviere eine Kamera vorübergehend, bis Frigate neu gestartet wird. Deaktivierung einer Kamera stoppt die Verarbeitung der Streams dieser Kamera durch Frigate vollständig. Erkennung, Aufzeichnung und Debugging sind dann nicht mehr verfügbar. <br /> <em>Hinweis: Dies deaktiviert nicht die go2rtc restreams.</em>"
|
||||
},
|
||||
"cameraConfig": {
|
||||
"add": "Kamera hinzufügen",
|
||||
"edit": "Kamera bearbeiten",
|
||||
"description": "Konfiguriere die Kameraeinstellungen, einschließlich Streams und Rollen.",
|
||||
"name": "Kamera-Name",
|
||||
"nameRequired": "Kamera-Name benötigt",
|
||||
"nameLength": "Kamera-Name darf maximal 64 Zeichen lang sein.",
|
||||
"namePlaceholder": "z.B. vordere_tür oder Hof Übersicht",
|
||||
"enabled": "Aktiviert",
|
||||
"ffmpeg": {
|
||||
"inputs": "Eingang Streams",
|
||||
"path": "Stream-Pfad",
|
||||
"pathRequired": "Stream-Pfad benötigt",
|
||||
"pathPlaceholder": "rtsp://...",
|
||||
"roles": "Rollen",
|
||||
"rolesRequired": "Mindestens eine Rolle wird benötigt",
|
||||
"rolesUnique": "Jede Rolle (audio, detect, record) kann nur einem Stream zugewiesen werden",
|
||||
"addInput": "Eingangs-Stream hinzufügen",
|
||||
"removeInput": "Eingangs-Stream entfernen",
|
||||
"inputsRequired": "Es wird mindestens ein Eingangs-Stream benötigt"
|
||||
},
|
||||
"go2rtcStreams": "go2rtc Streams",
|
||||
"streamUrls": "Stream URLs",
|
||||
"addUrl": "URL hinzufügen",
|
||||
"addGo2rtcStream": "go2rtc Stream hinzufügen",
|
||||
"toast": {
|
||||
"success": "Kamera {{cameraName}} erfolgreich gespeichert"
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraReview": {
|
||||
"title": "Kamera-Einstellungen überprüfen",
|
||||
"object_descriptions": {
|
||||
"title": "Generative KI Objektbeschreibungen",
|
||||
"desc": "Aktiviere/deaktiviere vorübergehend die Objektbeschreibungen durch Generative KI für diese Kamera. Wenn diese Option deaktiviert ist, werden keine KI-generierten Beschreibungen für verfolgte Objekte dieser Kamera erstellt."
|
||||
},
|
||||
"review_descriptions": {
|
||||
"title": "Generative KI Review Beschreibungen",
|
||||
"desc": "Generative KI Review Beschreibungen für diese Kamera vorübergehend aktivieren/deaktivieren. Wenn diese Option deaktiviert ist, werden für die Review Elemente dieser Kamera keine KI-generierten Beschreibungen angefordert."
|
||||
},
|
||||
"review": {
|
||||
"title": "Review",
|
||||
"desc": "Aktivieren/deaktivieren Sie vorübergehend Warnmeldungen und Erkennungen für diese Kamera, bis Frigate neu gestartet wird. Wenn diese Funktion deaktiviert ist, werden keine neuen Überprüfungselemente generiert. ",
|
||||
"alerts": "Warnungen ",
|
||||
"detections": "Erkennungen "
|
||||
},
|
||||
"reviewClassification": {
|
||||
"title": "Bewertungsklassifizierung",
|
||||
"desc": "Frigate kategorisiert zu überprüfende Elemente als Warnmeldungen und Erkennungen. Standardmäßig werden alle Objekte vom Typ <em>person</em> und <em>car</em> als Warnmeldungen betrachtet. Sie können die Kategorisierung der zu überprüfenden Elemente verfeinern, indem Sie die erforderlichen Zonen für sie konfigurieren.",
|
||||
"noDefinedZones": "Für diese Kamera sind keine Zonen definiert.",
|
||||
"objectAlertsTips": "Alle {{alertsLabels}}-Objekte auf {{cameraName}} werden als Warnmeldungen angezeigt.",
|
||||
"zoneObjectAlertsTips": "Alle {{alertsLabels}}-Objekte, die in {{zone}} auf {{cameraName}} erkannt wurden, werden als Warnmeldungen angezeigt.",
|
||||
"objectDetectionsTips": "Alle {{detectionsLabels}}-Objekte, die nicht unter {{cameraName}} kategorisiert sind, werden unabhängig davon, in welcher Zone sie sich befinden, als Erkennungen angezeigt.",
|
||||
"zoneObjectDetectionsTips": {
|
||||
"text": "Alle {{detectionsLabels}}-Objekte, die nicht in {{zone}} auf {{cameraName}} kategorisiert sind, werden als Erkennungen angezeigt.",
|
||||
"notSelectDetections": "Alle {{detectionsLabels}}-Objekte, die in {{zone}} auf {{cameraName}} erkannt und nicht als Warnmeldungen kategorisiert wurden, werden unabhängig davon, in welcher Zone sie sich befinden, als Erkennungen angezeigt.",
|
||||
"regardlessOfZoneObjectDetectionsTips": "Alle {{detectionsLabels}}-Objekte, die nicht unter {{cameraName}} kategorisiert sind, werden unabhängig davon, in welcher Zone sie sich befinden, als Erkennungen angezeigt."
|
||||
},
|
||||
"unsavedChanges": "Nicht gespeicherte Überprüfung der Klassifizierungseinstellungen für {{camera}}",
|
||||
"selectAlertsZones": "Zonen für Warnmeldungen auswählen",
|
||||
"selectDetectionsZones": "Zonen für Erkennungen auswählen",
|
||||
"limitDetections": "Erkennungen auf bestimmte Zonen beschränken",
|
||||
"toast": {
|
||||
"success": "Die Konfiguration der Bewertungsklassifizierung wurde gespeichert. Starten Sie Frigate neu, um die Änderungen zu übernehmen."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{
|
||||
"time": {
|
||||
"untilForTime": "Ως{{time}}",
|
||||
"untilForTime": "Ως {{time}}",
|
||||
"untilForRestart": "Μέχρι να γίνει επανεκίννηση του Frigate.",
|
||||
"untilRestart": "Μέχρι να γίνει επανεκκίνηση",
|
||||
"justNow": "Μόλις τώρα",
|
||||
"ago": "{{timeAgo}} Πριν",
|
||||
"ago": "Πριν {{timeAgo}}",
|
||||
"today": "Σήμερα",
|
||||
"yesterday": "Εχθές",
|
||||
"last7": "Τελευταίες 7 ημέρες",
|
||||
@@ -31,7 +31,44 @@
|
||||
"lastMonth": "Τελευταίος Μήνας",
|
||||
"5minutes": "5 λεπτά",
|
||||
"10minutes": "10 λεπτά",
|
||||
"30minutes": "30 λεπτά"
|
||||
"30minutes": "30 λεπτά",
|
||||
"1hour": "1 ώρα",
|
||||
"12hours": "12 ώρες",
|
||||
"24hours": "24 ώρες",
|
||||
"pm": "μ.μ.",
|
||||
"formattedTimestamp": {
|
||||
"12hour": "d MMM, h:mm:ss aaa",
|
||||
"24hour": "d MMM, HH:mm:ss"
|
||||
},
|
||||
"formattedTimestamp2": {
|
||||
"12hour": "MM/dd h:mm:ssa",
|
||||
"24hour": "d MMM HH:mm:ss"
|
||||
},
|
||||
"formattedTimestampHourMinute": {
|
||||
"12hour": "h:mm aaa",
|
||||
"24hour": "HH:mm"
|
||||
},
|
||||
"formattedTimestampHourMinuteSecond": {
|
||||
"12hour": "h:mm:ss aaa",
|
||||
"24hour": "HH:mm:ss"
|
||||
},
|
||||
"formattedTimestampMonthDayHourMinute": {
|
||||
"12hour": "d MMM, h:mm aaa",
|
||||
"24hour": "d MMM, HH:mm"
|
||||
},
|
||||
"formattedTimestampMonthDayYear": {
|
||||
"12hour": "d MMM yyyy",
|
||||
"24hour": "d MMM yyyy"
|
||||
},
|
||||
"formattedTimestampMonthDayYearHourMinute": {
|
||||
"12hour": "d MMM yyyy, h:mm aaa",
|
||||
"24hour": "d MMM yyyy, HH:mm"
|
||||
},
|
||||
"formattedTimestampMonthDay": "d MMM",
|
||||
"formattedTimestampFilename": {
|
||||
"12hour": "dd-MM-yy-h-mm-ss-a",
|
||||
"24hour": "dd-MM-yy-HH-mm-ss"
|
||||
}
|
||||
},
|
||||
"menu": {
|
||||
"live": {
|
||||
@@ -40,5 +77,49 @@
|
||||
"count_other": "{{count}} Κάμερες"
|
||||
}
|
||||
}
|
||||
},
|
||||
"button": {
|
||||
"save": "Αποθήκευση",
|
||||
"apply": "Εφαρμογή",
|
||||
"reset": "Επαναφορά",
|
||||
"done": "Τέλος",
|
||||
"enabled": "Ενεργοποιημένο",
|
||||
"enable": "Ενεργοποίηση",
|
||||
"disabled": "Απενεργοποιημένο",
|
||||
"disable": "Απενεργοποίηση",
|
||||
"saving": "Αποθήκευση…",
|
||||
"cancel": "Ακύρωση",
|
||||
"close": "Κλείσιμο",
|
||||
"copy": "Αντιγραφή",
|
||||
"back": "Πίσω",
|
||||
"pictureInPicture": "Εικόνα σε εικόνα",
|
||||
"cameraAudio": "Ήχος κάμερας",
|
||||
"edit": "Επεξεργασία",
|
||||
"copyCoordinates": "Αντιγραφή συντεταγμένων",
|
||||
"delete": "Διαγραφή",
|
||||
"yes": "Ναι",
|
||||
"no": "Όχι",
|
||||
"download": "Κατέβασμα",
|
||||
"info": "Πληροφορίες"
|
||||
},
|
||||
"unit": {
|
||||
"speed": {
|
||||
"mph": "mph",
|
||||
"kph": "χλμ/ώρα"
|
||||
},
|
||||
"length": {
|
||||
"meters": "μέτρα"
|
||||
},
|
||||
"data": {
|
||||
"kbps": "kB/s",
|
||||
"mbps": "MB/s",
|
||||
"gbps": "GB/s",
|
||||
"kbph": "kB/ώρα",
|
||||
"mbph": "MB/ώρα",
|
||||
"gbph": "GB/ώρα"
|
||||
}
|
||||
},
|
||||
"label": {
|
||||
"back": "Επιστροφή"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,5 +62,8 @@
|
||||
"audioDetect": {
|
||||
"enable": "Ενεργοποίηση Ανίχνευσης Ήχου",
|
||||
"disable": "Απενεργοποίηση Ανίχνευσης Ήχου"
|
||||
},
|
||||
"noCameras": {
|
||||
"buttonText": "Προσθήκη Κάμερας"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,5 +42,15 @@
|
||||
"cameraSetting": {
|
||||
"camera": "Κάμερα",
|
||||
"noCamera": "Δεν υπάρχει Κάμερα"
|
||||
},
|
||||
"triggers": {
|
||||
"dialog": {
|
||||
"form": {
|
||||
"friendly_name": {
|
||||
"placeholder": "Ονομάτισε ή περιέγραψε αυτό το εύνασμα",
|
||||
"description": "Ένα προαιρετικό φιλικό όνομα, ή ένα περιγραφικό κείμενο για αυτό το εύνασμα."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"user": "Username",
|
||||
"password": "Password",
|
||||
"login": "Login",
|
||||
"firstTimeLogin": "Trying to log in for the first time? Credentials are printed in the Frigate logs.",
|
||||
"errors": {
|
||||
"usernameRequired": "Username is required",
|
||||
"passwordRequired": "Password is required",
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
"export": "Export",
|
||||
"selectOrExport": "Select or Export",
|
||||
"toast": {
|
||||
"success": "Successfully started export. View the file in the /exports folder.",
|
||||
"success": "Successfully started export. View the file in the exports page.",
|
||||
"error": {
|
||||
"failed": "Failed to start export: {{error}}",
|
||||
"endTimeMustAfterStartTime": "End time must be after start time",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"label": "Min face recognitions for the sub label to be applied to the person object."
|
||||
},
|
||||
"save_attempts": {
|
||||
"label": "Number of face attempts to save in the train tab."
|
||||
"label": "Number of face attempts to save in the recent recognitions tab."
|
||||
},
|
||||
"blur_confidence_filter": {
|
||||
"label": "Apply blur quality filter to face confidence."
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"documentTitle": "Classification Models",
|
||||
"button": {
|
||||
"deleteClassificationAttempts": "Delete Classification Images",
|
||||
"renameCategory": "Rename Class",
|
||||
@@ -41,13 +42,94 @@
|
||||
"invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens."
|
||||
},
|
||||
"train": {
|
||||
"title": "Train",
|
||||
"aria": "Select Train"
|
||||
"title": "Recent Classifications",
|
||||
"aria": "Select Recent Classifications"
|
||||
},
|
||||
"categories": "Classes",
|
||||
"createCategory": {
|
||||
"new": "Create New Class"
|
||||
},
|
||||
"categorizeImageAs": "Classify Image As:",
|
||||
"categorizeImage": "Classify Image"
|
||||
"categorizeImage": "Classify Image",
|
||||
"noModels": {
|
||||
"object": {
|
||||
"title": "No Object Classification Models",
|
||||
"description": "Create a custom model to classify detected objects.",
|
||||
"buttonText": "Create Object Model"
|
||||
},
|
||||
"state": {
|
||||
"title": "No State Classification Models",
|
||||
"description": "Create a custom model to monitor and classify state changes in specific camera areas.",
|
||||
"buttonText": "Create State Model"
|
||||
}
|
||||
},
|
||||
"wizard": {
|
||||
"title": "Create New Classification",
|
||||
"steps": {
|
||||
"nameAndDefine": "Name & Define",
|
||||
"stateArea": "State Area",
|
||||
"chooseExamples": "Choose Examples"
|
||||
},
|
||||
"step1": {
|
||||
"description": "State models monitor fixed camera areas for changes (e.g., door open/closed). Object models add classifications to detected objects (e.g., known animals, delivery persons, etc.).",
|
||||
"name": "Name",
|
||||
"namePlaceholder": "Enter model name...",
|
||||
"type": "Type",
|
||||
"typeState": "State",
|
||||
"typeObject": "Object",
|
||||
"objectLabel": "Object Label",
|
||||
"objectLabelPlaceholder": "Select object type...",
|
||||
"classificationType": "Classification Type",
|
||||
"classificationTypeTip": "Learn about classification types",
|
||||
"classificationTypeDesc": "Sub Labels add additional text to the object label (e.g., 'Person: UPS'). Attributes are searchable metadata stored separately in the object metadata.",
|
||||
"classificationSubLabel": "Sub Label",
|
||||
"classificationAttribute": "Attribute",
|
||||
"classes": "Classes",
|
||||
"classesTip": "Learn about classes",
|
||||
"classesStateDesc": "Define the different states your camera area can be in. For example: 'open' and 'closed' for a garage door.",
|
||||
"classesObjectDesc": "Define the different categories to classify detected objects into. For example: 'delivery_person', 'resident', 'stranger' for person classification.",
|
||||
"classPlaceholder": "Enter class name...",
|
||||
"errors": {
|
||||
"nameRequired": "Model name is required",
|
||||
"nameLength": "Model name must be 64 characters or less",
|
||||
"nameOnlyNumbers": "Model name cannot contain only numbers",
|
||||
"classRequired": "At least 1 class is required",
|
||||
"classesUnique": "Class names must be unique",
|
||||
"stateRequiresTwoClasses": "State models require at least 2 classes",
|
||||
"objectLabelRequired": "Please select an object label",
|
||||
"objectTypeRequired": "Please select a classification type"
|
||||
}
|
||||
},
|
||||
"step2": {
|
||||
"description": "Select cameras and define the area to monitor for each camera. The model will classify the state of these areas.",
|
||||
"cameras": "Cameras",
|
||||
"selectCamera": "Select Camera",
|
||||
"noCameras": "Click + to add cameras",
|
||||
"selectCameraPrompt": "Select a camera from the list to define its monitoring area"
|
||||
},
|
||||
"step3": {
|
||||
"selectImagesPrompt": "Select all images with: {{className}}",
|
||||
"selectImagesDescription": "Click on images to select them. Click Continue when you're done with this class.",
|
||||
"generating": {
|
||||
"title": "Generating Sample Images",
|
||||
"description": "Frigate is pulling representative images from your recordings. This may take a moment..."
|
||||
},
|
||||
"training": {
|
||||
"title": "Training Model",
|
||||
"description": "Your model is being trained in the background. Close this dialog, and your model will start running as soon as training is complete."
|
||||
},
|
||||
"retryGenerate": "Retry Generation",
|
||||
"noImages": "No sample images generated",
|
||||
"classifying": "Classifying & Training...",
|
||||
"trainingStarted": "Training started successfully",
|
||||
"errors": {
|
||||
"noCameras": "No cameras configured",
|
||||
"noObjectLabel": "No object label selected",
|
||||
"generateFailed": "Failed to generate examples: {{error}}",
|
||||
"generationFailed": "Generation failed. Please try again.",
|
||||
"classifyFailed": "Failed to classify images: {{error}}"
|
||||
},
|
||||
"generateSuccess": "Successfully generated sample images"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,10 +19,11 @@
|
||||
"noFoundForTimePeriod": "No events found for this time period."
|
||||
},
|
||||
"detail": {
|
||||
"label": "Detail",
|
||||
"noDataFound": "No detail data to review",
|
||||
"aria": "Toggle detail view",
|
||||
"trackedObject_one": "tracked object",
|
||||
"trackedObject_other": "tracked objects",
|
||||
"trackedObject_one": "object",
|
||||
"trackedObject_other": "objects",
|
||||
"noObjectDetailData": "No object detail data available."
|
||||
},
|
||||
"objectTrack": {
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"video": "video",
|
||||
"object_lifecycle": "object lifecycle"
|
||||
},
|
||||
"objectLifecycle": {
|
||||
"title": "Object Lifecycle",
|
||||
"trackingDetails": {
|
||||
"title": "Tracking Details",
|
||||
"noImageFound": "No image found for this timestamp.",
|
||||
"createObjectMask": "Create Object Mask",
|
||||
"adjustAnnotationSettings": "Adjust annotation settings",
|
||||
@@ -168,9 +168,9 @@
|
||||
"label": "Download snapshot",
|
||||
"aria": "Download snapshot"
|
||||
},
|
||||
"viewObjectLifecycle": {
|
||||
"label": "View object lifecycle",
|
||||
"aria": "Show the object lifecycle"
|
||||
"viewTrackingDetails": {
|
||||
"label": "View tracking details",
|
||||
"aria": "Show the tracking details"
|
||||
},
|
||||
"findSimilar": {
|
||||
"label": "Find similar",
|
||||
@@ -194,12 +194,18 @@
|
||||
},
|
||||
"deleteTrackedObject": {
|
||||
"label": "Delete this tracked object"
|
||||
},
|
||||
"showObjectDetails": {
|
||||
"label": "Show object path"
|
||||
},
|
||||
"hideObjectDetails": {
|
||||
"label": "Hide object path"
|
||||
}
|
||||
},
|
||||
"dialog": {
|
||||
"confirmDelete": {
|
||||
"title": "Confirm Delete",
|
||||
"desc": "Deleting this tracked object removes the snapshot, any saved embeddings, and any associated object lifecycle entries. Recorded footage of this tracked object in History view will <em>NOT</em> be deleted.<br /><br />Are you sure you want to proceed?"
|
||||
"desc": "Deleting this tracked object removes the snapshot, any saved embeddings, and any associated tracking details entries. Recorded footage of this tracked object in History view will <em>NOT</em> be deleted.<br /><br />Are you sure you want to proceed?"
|
||||
}
|
||||
},
|
||||
"noTrackedObjects": "No Tracked Objects Found",
|
||||
|
||||
@@ -9,6 +9,12 @@
|
||||
"desc": "Enter a new name for this export.",
|
||||
"saveExport": "Save Export"
|
||||
},
|
||||
"tooltip": {
|
||||
"shareExport": "Share export",
|
||||
"downloadVideo": "Download video",
|
||||
"editName": "Edit name",
|
||||
"deleteExport": "Delete export"
|
||||
},
|
||||
"toast": {
|
||||
"error": {
|
||||
"renameExportFailed": "Failed to rename export: {{errorMessage}}"
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
{
|
||||
"description": {
|
||||
"addFace": "Walk through adding a new collection to the Face Library.",
|
||||
"addFace": "Add a new collection to the Face Library by uploading your first image.",
|
||||
"placeholder": "Enter a name for this collection",
|
||||
"invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens."
|
||||
},
|
||||
"details": {
|
||||
"subLabelScore": "Sub Label Score",
|
||||
"scoreInfo": "The sub label score is the weighted score for all of the recognized face confidences, so this may differ from the score shown on the snapshot.",
|
||||
"face": "Face Details",
|
||||
"faceDesc": "Details of the tracked object that generated this face",
|
||||
"timestamp": "Timestamp",
|
||||
"unknown": "Unknown"
|
||||
},
|
||||
@@ -19,10 +15,8 @@
|
||||
},
|
||||
"collections": "Collections",
|
||||
"createFaceLibrary": {
|
||||
"title": "Create Collection",
|
||||
"desc": "Create a new collection",
|
||||
"new": "Create New Face",
|
||||
"nextSteps": "To build a strong foundation:<li>Use the Train tab to select and train on images for each detected person.</li><li>Focus on straight-on images for best results; avoid training images that capture faces at an angle.</li></ul>"
|
||||
"nextSteps": "To build a strong foundation:<li>Use the Recent Recognitions tab to select and train on images for each detected person.</li><li>Focus on straight-on images for best results; avoid training images that capture faces at an angle.</li></ul>"
|
||||
},
|
||||
"steps": {
|
||||
"faceName": "Enter Face Name",
|
||||
@@ -33,12 +27,10 @@
|
||||
}
|
||||
},
|
||||
"train": {
|
||||
"title": "Train",
|
||||
"aria": "Select train",
|
||||
"title": "Recent Recognitions",
|
||||
"aria": "Select recent recognitions",
|
||||
"empty": "There are no recent face recognition attempts"
|
||||
},
|
||||
"selectItem": "Select {{item}}",
|
||||
"selectFace": "Select Face",
|
||||
"deleteFaceLibrary": {
|
||||
"title": "Delete Name",
|
||||
"desc": "Are you sure you want to delete the collection {{name}}? This will permanently delete all associated faces."
|
||||
@@ -69,7 +61,6 @@
|
||||
"maxSize": "Max size: {{size}}MB"
|
||||
},
|
||||
"nofaces": "No faces available",
|
||||
"pixels": "{{area}}px",
|
||||
"trainFaceAs": "Train Face as:",
|
||||
"trainFace": "Train Face",
|
||||
"toast": {
|
||||
|
||||
@@ -175,8 +175,8 @@
|
||||
"exitEdit": "Exit Editing"
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "No Cameras Set Up",
|
||||
"description": "Get started by connecting a camera.",
|
||||
"title": "No Cameras Configured",
|
||||
"description": "Get started by connecting a camera to Frigate.",
|
||||
"buttonText": "Add Camera"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,6 +188,10 @@
|
||||
"testSuccess": "Connection test successful!",
|
||||
"testFailed": "Connection test failed. Please check your input and try again.",
|
||||
"streamDetails": "Stream Details",
|
||||
"testing": {
|
||||
"probingMetadata": "Probing camera metadata...",
|
||||
"fetchingSnapshot": "Fetching camera snapshot..."
|
||||
},
|
||||
"warnings": {
|
||||
"noSnapshot": "Unable to fetch a snapshot from the configured stream."
|
||||
},
|
||||
@@ -197,8 +201,9 @@
|
||||
"nameLength": "Camera name must be 64 characters or less",
|
||||
"invalidCharacters": "Camera name contains invalid characters",
|
||||
"nameExists": "Camera name already exists",
|
||||
"customUrlRtspRequired": "Custom URLs must begin with \"rtsp://\". Manual configuration is required for non-RTSP camera streams.",
|
||||
"brands": {
|
||||
"reolink-rtsp": "Reolink RTSP is not recommended. It is recommended to enable http in the camera settings and restart the camera wizard."
|
||||
"reolink-rtsp": "Reolink RTSP is not recommended. Enable HTTP in the camera's firmware settings and restart the wizard."
|
||||
}
|
||||
},
|
||||
"docs": {
|
||||
@@ -272,6 +277,8 @@
|
||||
"title": "Stream Validation",
|
||||
"videoCodecGood": "Video codec is {{codec}}.",
|
||||
"audioCodecGood": "Audio codec is {{codec}}.",
|
||||
"resolutionHigh": "A resolution of {{resolution}} may cause increased resource usage.",
|
||||
"resolutionLow": "A resolution of {{resolution}} may be too low for reliable detection of small objects.",
|
||||
"noAudioWarning": "No audio detected for this stream, recordings will not have audio.",
|
||||
"audioCodecRecordError": "The AAC audio codec is required to support audio in recordings.",
|
||||
"audioCodecRequired": "An audio stream is required to support audio detection.",
|
||||
|
||||
@@ -280,5 +280,8 @@
|
||||
"desc": "Página no encontrada"
|
||||
},
|
||||
"selectItem": "Seleccionar {{item}}",
|
||||
"readTheDocumentation": "Leer la documentación"
|
||||
"readTheDocumentation": "Leer la documentación",
|
||||
"information": {
|
||||
"pixels": "{{area}}px"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,7 +120,8 @@
|
||||
"button": {
|
||||
"export": "Exportar",
|
||||
"markAsReviewed": "Marcar como revisado",
|
||||
"deleteNow": "Eliminar ahora"
|
||||
"deleteNow": "Eliminar ahora",
|
||||
"markAsUnreviewed": "Marcar como no revisado"
|
||||
}
|
||||
},
|
||||
"imagePicker": {
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
"selectImage": "Por favor, selecciona un archivo de imagen."
|
||||
},
|
||||
"dropActive": "Suelta la imagen aquí…",
|
||||
"dropInstructions": "Arrastra y suelta una imagen aquí, o haz clic para seleccionar",
|
||||
"dropInstructions": "Arrastra y suelta, o pega una imagen aquí, o haz clic para seleccionar",
|
||||
"maxSize": "Tamaño máximo: {{size}}MB"
|
||||
},
|
||||
"toast": {
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
"snapshots": "Capturas de pantalla",
|
||||
"autotracking": "Seguimiento automático",
|
||||
"cameraEnabled": "Cámara habilitada",
|
||||
"transcription": "Transcripción de audio"
|
||||
"transcription": "Transcripción de Audio"
|
||||
},
|
||||
"history": {
|
||||
"label": "Mostrar grabaciones históricas"
|
||||
@@ -170,5 +170,10 @@
|
||||
"transcription": {
|
||||
"enable": "Habilitar transcripción de audio en tiempo real",
|
||||
"disable": "Deshabilitar transcripción de audio en tiempo real"
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "No hay cámaras configuradas",
|
||||
"description": "Comienza conectando una cámara.",
|
||||
"buttonText": "Añade Cámara"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@
|
||||
"users": "Usuarios",
|
||||
"notifications": "Notificaciones",
|
||||
"enrichments": "Análisis avanzado",
|
||||
"triggers": "Disparadores"
|
||||
"triggers": "Disparadores",
|
||||
"roles": "Rols"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
@@ -773,7 +774,71 @@
|
||||
"desc": "Editar configuractión del disparador para cámara {{camera}}"
|
||||
},
|
||||
"deleteTrigger": {
|
||||
"title": "Eliminar Disparador"
|
||||
"title": "Eliminar Disparador",
|
||||
"desc": "Está seguro de que desea eliminar el disparador <strong>{{triggerName}}</strong>? Esta acción no se puede deshacer."
|
||||
},
|
||||
"form": {
|
||||
"name": {
|
||||
"title": "Nombre",
|
||||
"placeholder": "Entre nombre de disparador",
|
||||
"error": {
|
||||
"minLength": "El nombre debe tener al menos 2 caracteres.",
|
||||
"invalidCharacters": "El nombre sólo puede contener letras, números, guiones bajos, y guiones.",
|
||||
"alreadyExists": "Un disparador con este nombre ya existe para esta cámara."
|
||||
}
|
||||
},
|
||||
"enabled": {
|
||||
"description": "Activa o desactiva este disparador"
|
||||
},
|
||||
"type": {
|
||||
"title": "Tipo",
|
||||
"placeholder": "Seleccione tipo de disparador"
|
||||
},
|
||||
"friendly_name": {
|
||||
"title": "Nombre amigable",
|
||||
"placeholder": "Nombre o describa este disparador",
|
||||
"description": "Un nombre o texto descriptivo amigable (opcional) para este disparador."
|
||||
},
|
||||
"content": {
|
||||
"title": "Contenido",
|
||||
"imagePlaceholder": "Seleccione una imágen",
|
||||
"textPlaceholder": "Entre contenido de texto",
|
||||
"error": {
|
||||
"required": "El contenido es requrido."
|
||||
},
|
||||
"imageDesc": "Seleccione una imágen para iniciar esta acción cuando una imágen similar es detectada.",
|
||||
"textDesc": "Entre texto para iniciar esta acción cuando la descripción de un objecto seguido similar es detectado."
|
||||
},
|
||||
"threshold": {
|
||||
"title": "Umbral",
|
||||
"error": {
|
||||
"min": "El umbral debe ser al menos 0",
|
||||
"max": "El umbral debe ser al menos 1"
|
||||
}
|
||||
},
|
||||
"actions": {
|
||||
"title": "Acciones",
|
||||
"error": {
|
||||
"min": "Al menos una acción debe ser seleccionada."
|
||||
},
|
||||
"desc": "Por defecto, Frigate manda un mensaje MQTT por todos los disparadores. Seleccione una acción adicional que se realizará cuando este disparador se accione."
|
||||
}
|
||||
}
|
||||
},
|
||||
"semanticSearch": {
|
||||
"title": "Búsqueda semántica desactivada",
|
||||
"desc": "Búsqueda semántica debe estar activada para usar Disparadores."
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"createTrigger": "Disparador {{name}} creado exitosamente.",
|
||||
"updateTrigger": "Disparador {{name}} actualizado exitosamente.",
|
||||
"deleteTrigger": "Disparador {{name}} eliminado exitosamente."
|
||||
},
|
||||
"error": {
|
||||
"createTriggerFailed": "Fallo al crear el disparador: {{errorMessage}}",
|
||||
"updateTriggerFailed": "Fallo al actualizar el disparador: {{errorMessage}}",
|
||||
"deleteTriggerFailed": "Fallo al eliminar el disparador: {{errorMessage}}"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
"unusedStorageInformation": "Información de Almacenamiento No Utilizado"
|
||||
},
|
||||
"shm": {
|
||||
"title": "Asignación SHM (memoria compartida)",
|
||||
"title": "Asignación de SHM (memoria compartida)",
|
||||
"warning": "El tamaño actual de SHM de {{total}}MB es muy pequeño. Aumente al menos a {{min_shm}}MB."
|
||||
}
|
||||
},
|
||||
|
||||
@@ -425,5 +425,67 @@
|
||||
"fowl": "Volaille",
|
||||
"cluck": "Gloussement",
|
||||
"cock_a_doodle_doo": "Cocorico",
|
||||
"gobble": "Glouglou"
|
||||
"gobble": "Glouglou",
|
||||
"chird": "Accord",
|
||||
"change_ringing": "Changer la sonnerie",
|
||||
"sodeling": "Sodèle",
|
||||
"shofar": "Choffar",
|
||||
"liquid": "Liquide",
|
||||
"splash": "Éclabousser",
|
||||
"slosh": "Patauger",
|
||||
"squish": "Gargouillis",
|
||||
"drip": "Goutte",
|
||||
"trickle": "Filet",
|
||||
"gush": "Jet",
|
||||
"fill": "Remplir",
|
||||
"spray": "Pulvérisation",
|
||||
"pump": "Pompe",
|
||||
"stir": "Remuer",
|
||||
"boiling": "Ébullition",
|
||||
"arrow": "Flèche",
|
||||
"pour": "Verser",
|
||||
"sonar": "Sonar",
|
||||
"whoosh": "Whoosh",
|
||||
"thump": "Cogner",
|
||||
"thunk": "Je pense",
|
||||
"electronic_tuner": "Accordeur électronique",
|
||||
"effects_unit": "Unité d'effets",
|
||||
"chorus_effect": "Effet de chœur",
|
||||
"basketball_bounce": "Rebond de basket-ball",
|
||||
"bang": "Claquer",
|
||||
"slap": "Gifler",
|
||||
"whack": "Battre",
|
||||
"smash": "Fracasser",
|
||||
"breaking": "Rupture",
|
||||
"bouncing": "Rebondir",
|
||||
"whip": "Fouet",
|
||||
"flap": "Rabat",
|
||||
"scratch": "Gratter",
|
||||
"scrape": "Gratter",
|
||||
"rub": "Frotter",
|
||||
"roll": "Rouler",
|
||||
"crushing": "Écrasement",
|
||||
"crumpling": "Froissement",
|
||||
"tearing": "Déchirure",
|
||||
"beep": "Bip",
|
||||
"ping": "Ping",
|
||||
"ding": "Ding",
|
||||
"clang": "Bruit",
|
||||
"squeal": "Hurler",
|
||||
"creak": "Craquer",
|
||||
"rustle": "Bruissement",
|
||||
"whir": "Vrombissement",
|
||||
"clatter": "Bruit",
|
||||
"sizzle": "Grésiller",
|
||||
"clicking": "En cliquant",
|
||||
"clickety_clack": "Clic-clac",
|
||||
"rumble": "Gronder",
|
||||
"plop": "Ploc",
|
||||
"hum": "Hum",
|
||||
"harmonic": "Harmonique",
|
||||
"outside": "Extérieur",
|
||||
"reverberation": "Réverbération",
|
||||
"echo": "Echo",
|
||||
"distortion": "Distorsion",
|
||||
"vibration": "Vibration"
|
||||
}
|
||||
|
||||
@@ -279,6 +279,17 @@
|
||||
"length": {
|
||||
"feet": "pieds",
|
||||
"meters": "mètres"
|
||||
},
|
||||
"data": {
|
||||
"kbps": "ko/s",
|
||||
"mbps": "Mo/s",
|
||||
"gbps": "Go/s",
|
||||
"kbph": "ko/heure",
|
||||
"mbph": "Mo/heure",
|
||||
"gbph": "Go/heure"
|
||||
}
|
||||
},
|
||||
"information": {
|
||||
"pixels": "{{area}}px"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,7 +120,8 @@
|
||||
"button": {
|
||||
"export": "Exporter",
|
||||
"markAsReviewed": "Marquer comme passé en revue",
|
||||
"deleteNow": "Supprimer maintenant"
|
||||
"deleteNow": "Supprimer maintenant",
|
||||
"markAsUnreviewed": "Marqué comme non passé en revue"
|
||||
}
|
||||
},
|
||||
"imagePicker": {
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
},
|
||||
"imageEntry": {
|
||||
"dropActive": "Déposez l'image ici…",
|
||||
"dropInstructions": "Glissez et déposez une image ici, ou cliquez pour sélectionner",
|
||||
"dropInstructions": "Glissez-déposez ou coller une image ici, ou cliquez pour la sélectionner",
|
||||
"maxSize": "Taille max : {{size}}Mo",
|
||||
"validation": {
|
||||
"selectImage": "Veuillez sélectionner un fichier image."
|
||||
|
||||
@@ -94,8 +94,8 @@
|
||||
"failedToEnd": "Impossible de terminer l'enregistrement manuel à la demande.",
|
||||
"started": "Enregistrement à la demande démarré.",
|
||||
"recordDisabledTips": "Puisque l'enregistrement est désactivé ou restreint dans la configuration de cette caméra, seul un instantané sera enregistré.",
|
||||
"title": "Enregistrement à la demande",
|
||||
"tips": "Démarrez un événement manuel en fonction des paramètres de conservation d'enregistrement de cette caméra."
|
||||
"title": "À la demande",
|
||||
"tips": "Téléchargez un instantané immédiat ou démarrez un événement manuel en fonction des paramètres de conservation d'enregistrement de cette caméra."
|
||||
},
|
||||
"streamingSettings": "Paramètres de streaming",
|
||||
"notifications": "Notifications",
|
||||
@@ -170,5 +170,16 @@
|
||||
"transcription": {
|
||||
"enable": "Activer la transcription audio en direct",
|
||||
"disable": "Désactiver la transcription audio en direct"
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "Aucune caméra configurée",
|
||||
"description": "Pour commencer, connectez une caméra.",
|
||||
"buttonText": "Ajouter une caméra"
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "Télécharger un instantané immédiat",
|
||||
"noVideoSource": "Aucune source disponible pour un instantané.",
|
||||
"captureFailed": "Échec de la capture d'instantané.",
|
||||
"downloadStarted": "Démarrage du téléchargement de l'instantané."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
"object": "Débogage - Frigate",
|
||||
"frigatePlus": "Paramètres Frigate+ - Frigate",
|
||||
"notifications": "Paramètres de notification - Frigate",
|
||||
"enrichments": "Paramètres d'enrichissements - Frigate"
|
||||
"enrichments": "Paramètres d'enrichissements - Frigate",
|
||||
"cameraManagement": "Gestion des caméras - Frigate",
|
||||
"cameraReview": "Paramètres de revue des caméras - Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"ui": "Interface utilisateur",
|
||||
@@ -23,7 +25,10 @@
|
||||
"notifications": "Notifications",
|
||||
"frigateplus": "Frigate+",
|
||||
"enrichments": "Enrichissements",
|
||||
"triggers": "Déclencheurs"
|
||||
"triggers": "Déclencheurs",
|
||||
"roles": "Rôles",
|
||||
"cameraManagement": "Gestion",
|
||||
"cameraReview": "Revue"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
@@ -816,6 +821,11 @@
|
||||
"error": {
|
||||
"min": "Au moins une action doit être sélectionnée."
|
||||
}
|
||||
},
|
||||
"friendly_name": {
|
||||
"title": "Nom convivial",
|
||||
"placeholder": "Nommez ou décrivez ce déclencheur",
|
||||
"description": "Nom convivial ou texte descriptif facultatif pour ce déclencheur."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -830,6 +840,10 @@
|
||||
"updateTriggerFailed": "Échec de la mise à jour du déclencheur : {{errorMessage}}",
|
||||
"deleteTriggerFailed": "Échec de la suppression du déclencheur : {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"semanticSearch": {
|
||||
"title": "La recherche sémantique est désactivée",
|
||||
"desc": "La recherche sémantique doit être activée pour utiliser les déclencheurs."
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
@@ -891,5 +905,222 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraWizard": {
|
||||
"title": "Ajouter une caméra",
|
||||
"description": "Suivez les étapes ci-dessous pour ajouter une nouvelle caméra à votre installation Frigate.",
|
||||
"steps": {
|
||||
"nameAndConnection": "Nom et connexion",
|
||||
"streamConfiguration": "Configuration du flux",
|
||||
"validationAndTesting": "Validation et tests"
|
||||
},
|
||||
"save": {
|
||||
"success": "Réussite de l'enregistrement de la nouvelle caméra {{cameraName}}.",
|
||||
"failure": "Échec lors de l'enregistrement de {{cameraName}}."
|
||||
},
|
||||
"testResultLabels": {
|
||||
"resolution": "Résolution",
|
||||
"video": "Vidéo",
|
||||
"audio": "Audio",
|
||||
"fps": "FPS"
|
||||
},
|
||||
"commonErrors": {
|
||||
"noUrl": "Saisissez une URL de flux valide s'il vous plait",
|
||||
"testFailed": "Échec du test de flux : {{error}}"
|
||||
},
|
||||
"step1": {
|
||||
"description": "Saisissez les détails de votre caméra et testez la connexion.",
|
||||
"cameraName": "Nom de la caméra",
|
||||
"cameraNamePlaceholder": "par exemple, porte-entree ou Apercu Arriere_Cour",
|
||||
"host": "Hôte / Adresse IP",
|
||||
"port": "Port",
|
||||
"username": "Nom d'utilisateur",
|
||||
"usernamePlaceholder": "Facultatif",
|
||||
"password": "Mot de passe",
|
||||
"passwordPlaceholder": "Facultatif",
|
||||
"selectTransport": "Sélectionnez le protocole de transport",
|
||||
"cameraBrand": "Marque de la caméra",
|
||||
"selectBrand": "Sélectionnez la marque de la caméra pour le modèle de l'URL",
|
||||
"customUrl": "URL de flux personnalisé",
|
||||
"brandInformation": "Information sur la marque",
|
||||
"brandUrlFormat": "Pour les caméras avec un format d'URL RTSP comme : {{exampleUrl}}",
|
||||
"customUrlPlaceholder": "rtsp://nomutilisateur:motdepasse@hote:port/chemin",
|
||||
"testConnection": "Tester la connexion",
|
||||
"testSuccess": "Test de connexion réussi !",
|
||||
"testFailed": "Échec du test de connexion. Veuillez vérifier votre saisie et réessayer.",
|
||||
"streamDetails": "Détails du flux",
|
||||
"warnings": {
|
||||
"noSnapshot": "Impossible de récupérer un instantané à partir du flux configuré."
|
||||
},
|
||||
"errors": {
|
||||
"brandOrCustomUrlRequired": "Sélectionnez une marque de caméra avec hôte/IP ou choisissez « Autre » avec une URL personnalisée",
|
||||
"nameRequired": "Le nom de la caméra est requis",
|
||||
"nameLength": "Le nom de la caméra doit comporter 64 caractères ou moins",
|
||||
"invalidCharacters": "Le nom de la caméra contient des caractères non valides",
|
||||
"nameExists": "Le nom de la caméra existe déjà",
|
||||
"brands": {
|
||||
"reolink-rtsp": "Reolink RTSP n'est pas recommandé. Il est recommandé d'activer le protocole HTTP dans les paramètres de la caméra et de redémarrer l'assistant."
|
||||
}
|
||||
},
|
||||
"docs": {
|
||||
"reolink": "https://docs.frigate.video/configuration/camera_specific.html#reolink-cameras"
|
||||
}
|
||||
},
|
||||
"step2": {
|
||||
"description": "Configurez les rôles du flux et ajoutez des flux supplémentaires pour votre caméra.",
|
||||
"streamsTitle": "Flux de caméra",
|
||||
"addStream": "Ajouter un flux",
|
||||
"addAnotherStream": "Ajouter un autre flux",
|
||||
"streamTitle": "Flux {{number}}",
|
||||
"streamUrl": "URL du flux",
|
||||
"streamUrlPlaceholder": "rtsp://username:password@host:port/path",
|
||||
"url": "URL",
|
||||
"resolution": "Résolution",
|
||||
"selectResolution": "Sélectionnez la résolution",
|
||||
"quality": "Qualité",
|
||||
"selectQuality": "Sélectionnez la qualité",
|
||||
"roles": "Rôles",
|
||||
"roleLabels": {
|
||||
"record": "Enregistrement",
|
||||
"audio": "Audio",
|
||||
"detect": "Détection d'objet"
|
||||
},
|
||||
"testStream": "Tester la connexion",
|
||||
"testSuccess": "Test de diffusion réussi !",
|
||||
"testFailed": "Le test du flux a échoué",
|
||||
"testFailedTitle": "Échec du test",
|
||||
"connected": "Connecté",
|
||||
"notConnected": "Non connecté",
|
||||
"featuresTitle": "Caractéristiques",
|
||||
"go2rtc": "Réduire les connexions à la caméra",
|
||||
"detectRoleWarning": "Au moins un flux doit avoir le rôle « détecter » pour continuer.",
|
||||
"rolesPopover": {
|
||||
"title": "Rôles du flux",
|
||||
"detect": "Flux principal pour la détection d'objets.",
|
||||
"record": "Enregistre des segments du flux vidéo en fonction des paramètres de configuration.",
|
||||
"audio": "Flux pour la détection basée sur l'audio."
|
||||
},
|
||||
"featuresPopover": {
|
||||
"title": "Fonctionnalités du flux",
|
||||
"description": "Utilisez le flux go2rtc pour réduire le nombre de connexions à votre caméra."
|
||||
}
|
||||
},
|
||||
"step3": {
|
||||
"description": "Validation finale et analyse avant d'enregistrer votre nouvelle caméra. Connectez chaque flux avant d'enregistrer.",
|
||||
"validationTitle": "Validation du flux",
|
||||
"connectAllStreams": "Connecter tous les flux",
|
||||
"reconnectionSuccess": "Reconnexion réussie.",
|
||||
"reconnectionPartial": "Certains flux n'ont pas pu se reconnecter.",
|
||||
"streamUnavailable": "Aperçu du flux indisponible",
|
||||
"reload": "Recharger",
|
||||
"connecting": "Connexion...",
|
||||
"streamTitle": "Flux {{number}}",
|
||||
"failed": "Échoué",
|
||||
"notTested": "Non testé",
|
||||
"connectStream": "Connecter",
|
||||
"connectingStream": "Connexion",
|
||||
"disconnectStream": "Déconnecter",
|
||||
"estimatedBandwidth": "Bande passante estimée",
|
||||
"roles": "Rôles",
|
||||
"none": "Aucun",
|
||||
"error": "Erreur",
|
||||
"streamValidated": "Flux {{number}} validé avec succès",
|
||||
"streamValidationFailed": "La validation du flux {{number}} a échoué",
|
||||
"saveAndApply": "Enregistrer une nouvelle caméra",
|
||||
"saveError": "Configuration invalide. Veuillez vérifier vos paramètres.",
|
||||
"issues": {
|
||||
"title": "Validation du flux",
|
||||
"videoCodecGood": "Le codec vidéo est {{codec}}.",
|
||||
"audioCodecGood": "Le codec audio est {{codec}}.",
|
||||
"noAudioWarning": "Aucun audio détecté pour ce flux, les enregistrements n'auront pas d'audio.",
|
||||
"audioCodecRecordError": "Le codec audio AAC est requis pour prendre en charge l'audio dans les enregistrements.",
|
||||
"audioCodecRequired": "Un flux audio est requis pour prendre en charge la détection audio.",
|
||||
"restreamingWarning": "La réduction des connexions à la caméra pour le flux d'enregistrement peut augmenter légèrement l'utilisation du processeur.",
|
||||
"dahua": {
|
||||
"substreamWarning": "Le sous-flux 1 est verrouillé en basse résolution. De nombreuses caméras Dahua / Amcrest / EmpireTech prennent en charge des sous-flux supplémentaires qui doivent être activés dans les paramètres de la caméra. Il est recommandé de vérifier et d'utiliser ces flux s'ils sont disponibles."
|
||||
},
|
||||
"hikvision": {
|
||||
"substreamWarning": "Le sous-flux 1 est verrouillé en basse résolution. De nombreuses caméras Hikvision prennent en charge des sous-flux supplémentaires qui doivent être activés dans les paramètres de la caméra. Il est recommandé de vérifier et d'utiliser ces flux s'ils sont disponibles."
|
||||
}
|
||||
},
|
||||
"valid": "Validation"
|
||||
}
|
||||
},
|
||||
"cameraManagement": {
|
||||
"title": "Gérer les caméras",
|
||||
"addCamera": "Ajouter une nouvelle caméra",
|
||||
"editCamera": "Modifier la caméra:",
|
||||
"selectCamera": "Sélectionnez une caméra",
|
||||
"backToSettings": "Retour aux paramètres de la caméra",
|
||||
"streams": {
|
||||
"title": "Activer/Désactiver les caméras",
|
||||
"desc": "Désactivez temporairement une caméra jusqu'au redémarrage de Frigate. La désactivation d'une caméra interrompt complètement le traitement des flux de cette caméra par Frigate. La détection, l'enregistrement et le débogage seront indisponibles.<br /><em>Remarque : Ceci ne désactive pas les rediffusions go2rtc.</em>"
|
||||
},
|
||||
"cameraConfig": {
|
||||
"add": "Ajouter une caméra",
|
||||
"edit": "Modifier la caméra",
|
||||
"description": "Configurez les paramètres de la caméra, y compris les entrées de flux et les rôles.",
|
||||
"name": "Nom de la caméra",
|
||||
"nameRequired": "Le nom de la caméra est requis",
|
||||
"nameLength": "Le nom de la caméra doit comporter moins de 64 caractères.",
|
||||
"namePlaceholder": "par exemple, porte d'entrée ou aperçu de la cour arrière",
|
||||
"enabled": "Activé",
|
||||
"ffmpeg": {
|
||||
"inputs": "Flux d'entrée",
|
||||
"path": "Chemin du flux",
|
||||
"pathRequired": "Le chemin du flux est requis",
|
||||
"pathPlaceholder": "rtsp://...",
|
||||
"roles": "Rôles",
|
||||
"rolesRequired": "Au moins un rôle est requis",
|
||||
"rolesUnique": "Chaque rôle (audio, détection, enregistrement) ne peut être attribué qu'à un seul flux",
|
||||
"addInput": "Ajouter un flux d'entrée",
|
||||
"removeInput": "Supprimer le flux d'entrée",
|
||||
"inputsRequired": "Au moins un flux d'entrée est requis"
|
||||
},
|
||||
"go2rtcStreams": "Flux go2rtc",
|
||||
"streamUrls": "URLs des flux",
|
||||
"addUrl": "Ajouter une URL",
|
||||
"addGo2rtcStream": "Ajouter un flux go2rtc",
|
||||
"toast": {
|
||||
"success": "La caméra {{cameraName}} a été enregistrée avec succès"
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraReview": {
|
||||
"title": "Paramètres d'examen de la caméra",
|
||||
"object_descriptions": {
|
||||
"title": "Descriptions d'objets IA génératives",
|
||||
"desc": "Activez/désactivez temporairement les descriptions d'objets générées par l'IA pour cette caméra. Si elles sont désactivées, les descriptions générées par l'IA ne seront pas demandées pour les objets suivis par cette caméra."
|
||||
},
|
||||
"review_descriptions": {
|
||||
"title": "Descriptions des évaluations de l'IA générative",
|
||||
"desc": "Activez/désactivez temporairement les descriptions d'évaluation génératrices par l'IA pour cette caméra. Si elles sont désactivées, les descriptions générées par l'IA ne seront pas demandées pour les éléments d'évaluation de cette caméra."
|
||||
},
|
||||
"review": {
|
||||
"title": "Revoir",
|
||||
"desc": "Activez/désactivez temporairement les alertes et les détections pour cette caméra jusqu'au redémarrage de Frigate. Une fois désactivée, aucun nouvel élément d'analyse ne sera généré. ",
|
||||
"alerts": "Alertes. ",
|
||||
"detections": "Détections. "
|
||||
},
|
||||
"reviewClassification": {
|
||||
"title": "Classement des avis",
|
||||
"desc": "Frigate catégorise les éléments d'évaluation en alertes et détections. Par défaut, tous les objets <em>personne</em> et <em>voiture</em> sont considérés comme des alertes. Vous pouvez affiner la catégorisation de vos éléments d'évaluation en configurant les zones requises.",
|
||||
"noDefinedZones": "Aucune zone n'est définie pour cette caméra.",
|
||||
"objectAlertsTips": "Tous les objets {{alertsLabels}} sur {{cameraName}} seront affichés sous forme d'alertes.",
|
||||
"zoneObjectAlertsTips": "Tous les objets {{alertsLabels}} détectés dans {{zone}} sur {{cameraName}} seront affichés sous forme d'alertes.",
|
||||
"objectDetectionsTips": "Tous les objets {{detectionsLabels}} non classés sur {{cameraName}} seront affichés comme détections, quelle que soit la zone dans laquelle ils se trouvent.",
|
||||
"zoneObjectDetectionsTips": {
|
||||
"text": "Tous les objets {{detectionsLabels}} non classés dans {{zone}} sur {{cameraName}} seront affichés comme détections.",
|
||||
"notSelectDetections": "Tous les objets {{detectionsLabels}} détectés dans {{zone}} sur {{cameraName}} non classés comme alertes seront affichés comme détections, quelle que soit la zone dans laquelle ils se trouvent.",
|
||||
"regardlessOfZoneObjectDetectionsTips": "Tous les objets {{detectionsLabels}} non classés sur {{cameraName}} seront affichés comme détections, quelle que soit la zone dans laquelle ils se trouvent."
|
||||
},
|
||||
"unsavedChanges": "Paramètres de classification des avis non enregistrés pour {{camera}}",
|
||||
"selectAlertsZones": "Sélectionnez les zones pour les alertes",
|
||||
"selectDetectionsZones": "Sélectionner les zones pour les détections",
|
||||
"limitDetections": "Limiter les détections à des zones spécifiques",
|
||||
"toast": {
|
||||
"success": "La configuration de la classification a été enregistrée. Redémarrez Frigate pour appliquer les modifications."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"humming": "זמזום",
|
||||
"groan": "אנקה",
|
||||
"grunt": "לנחור",
|
||||
"whistling": "שריקה",
|
||||
"whistling": "לשרוק",
|
||||
"breathing": "נשימה",
|
||||
"wheeze": "גניחה",
|
||||
"snoring": "נחירה",
|
||||
@@ -69,7 +69,7 @@
|
||||
"fly": "זבוב",
|
||||
"buzz": "זמזם.",
|
||||
"frog": "צפרדע",
|
||||
"croak": "קרקור.",
|
||||
"croak": "קִרקוּר",
|
||||
"snake": "נחש",
|
||||
"rattle": "טרטור",
|
||||
"whale_vocalization": "קולות לוויתן",
|
||||
@@ -81,7 +81,7 @@
|
||||
"bass_guitar": "גיטרה בס",
|
||||
"acoustic_guitar": "גיטרה אקוסטית",
|
||||
"steel_guitar": "גיטרה פלדה",
|
||||
"tapping": "הקשה.",
|
||||
"tapping": "להקיש",
|
||||
"strum": "פריטה",
|
||||
"banjo": "בנג'ו",
|
||||
"sitar": "סיטאר",
|
||||
@@ -189,7 +189,7 @@
|
||||
"church_bell": "פעמון כנסיה",
|
||||
"jingle_bell": "ג'ינגל בל",
|
||||
"bicycle_bell": "פעמון אופניים",
|
||||
"chime": "צלצול",
|
||||
"chime": "צִלצוּל",
|
||||
"wind_chime": "פעמון רוח",
|
||||
"harmonica": "הרמוניקה",
|
||||
"accordion": "אקורדיון",
|
||||
@@ -341,7 +341,7 @@
|
||||
"microwave_oven": "מיקרוגל",
|
||||
"water_tap": "ברז מים",
|
||||
"bathtub": "אמבטיה",
|
||||
"dishes": "כלים.",
|
||||
"dishes": "מנות",
|
||||
"scissors": "מספריים",
|
||||
"toothbrush": "מברשת שיניים",
|
||||
"toilet_flush": "הורדת מים לאסלה",
|
||||
@@ -355,7 +355,7 @@
|
||||
"computer_keyboard": "מקלדת מחשב",
|
||||
"writing": "כתיבה",
|
||||
"telephone_bell_ringing": "צלצול טלפון",
|
||||
"ringtone": "צליל חיוג.",
|
||||
"ringtone": "צלצול",
|
||||
"clock": "שעון",
|
||||
"telephone_dialing": "טלפון מחייג",
|
||||
"dial_tone": "צליל חיוג",
|
||||
@@ -425,5 +425,54 @@
|
||||
"slam": "טריקה",
|
||||
"telephone": "טלפון",
|
||||
"tuning_fork": "מזלג כוונון",
|
||||
"raindrop": "טיפות גשם"
|
||||
"raindrop": "טיפות גשם",
|
||||
"smash": "רסק",
|
||||
"boiling": "רותח",
|
||||
"sonar": "סונר",
|
||||
"arrow": "חץ",
|
||||
"whack": "מַהֲלוּמָה",
|
||||
"sine_wave": "גל סינוס",
|
||||
"harmonic": "הרמוניה",
|
||||
"chirp_tone": "צליל ציוץ",
|
||||
"pulse": "דוֹפֶק",
|
||||
"inside": "בְּתוֹך",
|
||||
"outside": "בחוץ",
|
||||
"reverberation": "הִדהוּד",
|
||||
"echo": "הד",
|
||||
"noise": "רעש",
|
||||
"mains_hum": "זמזום ראשי",
|
||||
"distortion": "סַלְפָנוּת",
|
||||
"sidetone": "צליל צדדי",
|
||||
"cacophony": "קָקוֹפוֹניָה",
|
||||
"throbbing": "פְּעִימָה",
|
||||
"vibration": "רֶטֶט",
|
||||
"sodeling": "מיזוג",
|
||||
"change_ringing": "שינוי צלצול",
|
||||
"shofar": "שופר",
|
||||
"liquid": "נוזל",
|
||||
"splash": "התזה",
|
||||
"slosh": "שכשוך",
|
||||
"squish": "מעיכה",
|
||||
"drip": "טפטוף",
|
||||
"pour": "לִשְׁפּוֹך",
|
||||
"trickle": "לְטַפטֵף",
|
||||
"gush": "פֶּרֶץ",
|
||||
"fill": "מילוי",
|
||||
"spray": "ריסוס",
|
||||
"pump": "משאבה",
|
||||
"stir": "בחישה",
|
||||
"whoosh": "מהיר",
|
||||
"thump": "חֲבָטָה",
|
||||
"thunk": "תרועה",
|
||||
"electronic_tuner": "מכוון אלקטרוני",
|
||||
"effects_unit": "יחידת אפקטים",
|
||||
"chorus_effect": "אפקט מקהלה",
|
||||
"basketball_bounce": "קפיצת כדורסל",
|
||||
"bang": "לִדפּוֹק",
|
||||
"slap": "סְטִירָה",
|
||||
"breaking": "שְׁבִירָה",
|
||||
"bouncing": "הַקפָּצָה",
|
||||
"whip": "שׁוֹט",
|
||||
"flap": "מַדָף",
|
||||
"scratch": "לְגַרֵד"
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"filter": "לסנן",
|
||||
"filter": "מסנן",
|
||||
"features": {
|
||||
"submittedToFrigatePlus": {
|
||||
"tips": "עליך תחילה לסנן לפי אובייקטים במעקב שיש להם תמונת מצב.<br /><br />לא ניתן לשלוח ל-Frigate+ אובייקטים במעקב ללא תמונת מצב.",
|
||||
@@ -26,7 +26,7 @@
|
||||
}
|
||||
},
|
||||
"dates": {
|
||||
"selectPreset": "בחר פריסט…",
|
||||
"selectPreset": "בחר הגדרה…",
|
||||
"all": {
|
||||
"title": "כל התאריכים",
|
||||
"short": "תאריכים"
|
||||
@@ -71,16 +71,16 @@
|
||||
"title": "הגדרות",
|
||||
"defaultView": {
|
||||
"summary": "סיכום",
|
||||
"unfilteredGrid": "תצוגה מלאה",
|
||||
"unfilteredGrid": "טבלה לא מסוננת",
|
||||
"title": "תצוגת ברירת מחדל",
|
||||
"desc": "כאשר לא נבחרו מסננים, הצג סיכום של האובייקטים האחרונים שעברו מעקב לפי תווית, או הצג רשת לא מסוננת."
|
||||
},
|
||||
"gridColumns": {
|
||||
"title": "עמודות גריד",
|
||||
"desc": "בחר את מספר העמודות בגריד."
|
||||
"title": "עמודות טבלה",
|
||||
"desc": "בחר את מספר העמודות בטבלה."
|
||||
},
|
||||
"searchSource": {
|
||||
"label": "מקור חיפוש",
|
||||
"label": "חיפוש במקור",
|
||||
"desc": "בחר אם לחפש בתמונות הממוזערות או בתיאורים של האובייקטים שבמעקב.",
|
||||
"options": {
|
||||
"thumbnailImage": "תמונה ממוזערת",
|
||||
@@ -100,7 +100,7 @@
|
||||
"error": "מחיקת אובייקטים במעקב נכשלה: {{errorMessage}}"
|
||||
},
|
||||
"title": "אישור מחיקה",
|
||||
"desc": "מחיקת אובייקטים אלה שעברו מעקב ({{objectLength}}) מסירה את לכידת התמונה, כל ההטמעות שנשמרו וכל ערכי שלבי האובייקט המשויכים. קטעי וידאו מוקלטים של אובייקטים אלה שעברו מעקב בתצוגת היסטוריה <em>לא</em> יימחקו.<br /><br />האם אתה בטוח שברצונך להמשיך?<br /><br />החזק את מקש <em>Shift</em> כדי לעקוף תיבת דו-שיח זו בעתיד."
|
||||
"desc": "מחיקת אובייקטים אלה ({{objectLength}}) שעברו מעקב מסירה את לכידת התמונה, כל ההטמעות שנשמרו וכל ערכי שלבי האובייקט המשויכים. קטעי וידאו מוקלטים של אובייקטים אלה שעברו מעקב בתצוגת היסטוריה <em>לא</em> יימחקו.<br /><br />האם אתה בטוח שברצונך להמשיך?<br /><br />החזק את מקש <em>Shift</em> כדי לעקוף תיבת דו-שיח זו בעתיד."
|
||||
},
|
||||
"zoneMask": {
|
||||
"filterBy": "סינון לפי מיסוך אזור"
|
||||
@@ -111,16 +111,26 @@
|
||||
"loading": "טוען לוחיות רישוי מזוהות…",
|
||||
"placeholder": "הקלד כדי לחפש לוחיות רישוי…",
|
||||
"noLicensePlatesFound": "לא נמצאו לוחיות רישוי.",
|
||||
"selectPlatesFromList": "בחירת לוחית אחת או יותר מהרשימה."
|
||||
"selectPlatesFromList": "בחירת לוחית אחת או יותר מהרשימה.",
|
||||
"selectAll": "בחר הכל",
|
||||
"clearAll": "נקה הכל"
|
||||
},
|
||||
"logSettings": {
|
||||
"label": "סינון רמת לוג",
|
||||
"filterBySeverity": "סנן לוגים לפי חומרה",
|
||||
"filterBySeverity": "סנן לוגים לפי חוּמרָה",
|
||||
"loading": {
|
||||
"title": "טוען",
|
||||
"desc": "כאשר חלונית הלוגים גוללת לתחתית, לוגים חדשים מוזרמים אוטומטית עם הוספתם."
|
||||
"desc": "כאשר חלונית הלוגים מגוללת לתחתית, לוגים חדשים מוזרמים אוטומטית עם הוספתם."
|
||||
},
|
||||
"disableLogStreaming": "השבתת זרימה של לוגים",
|
||||
"allLogs": "כל הלוגים"
|
||||
},
|
||||
"classes": {
|
||||
"label": "מחלקות",
|
||||
"all": {
|
||||
"title": "כל המחלקות"
|
||||
},
|
||||
"count_one": "{{count}} מחלקה",
|
||||
"count_other": "{{count}} מחלקות"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"documentTitle": "עורך הגדרות - פריגטה",
|
||||
"documentTitle": "עורך הגדרות - Frigate",
|
||||
"configEditor": "עורך תצורה",
|
||||
"copyConfig": "העתקת הגדרות",
|
||||
"saveAndRestart": "שמירה והפעלה מחדש",
|
||||
@@ -12,5 +12,7 @@
|
||||
"error": {
|
||||
"savingError": "שגיאה בשמירת ההגדרות"
|
||||
}
|
||||
}
|
||||
},
|
||||
"safeConfigEditor": "עורך תצורה (מצב בטוח)",
|
||||
"safeModeDescription": "Frigate במצב בטוח עקב שגיאת אימות הגדרות."
|
||||
}
|
||||
|
||||
@@ -34,5 +34,16 @@
|
||||
"selected_one": "נבחרו {{count}}",
|
||||
"selected_other": "{{count}} נבחרו",
|
||||
"camera": "מצלמה",
|
||||
"detected": "זוהה"
|
||||
"detected": "זוהה",
|
||||
"detail": {
|
||||
"noDataFound": "אין נתונים מפורטים לבדיקה",
|
||||
"aria": "הפעלה/כיבוי תצוגת פרטים",
|
||||
"trackedObject_one": "אובייקט במעקב",
|
||||
"trackedObject_other": "אובייקטים במעקב",
|
||||
"noObjectDetailData": "אין נתוני אובייקט זמינים."
|
||||
},
|
||||
"objectTrack": {
|
||||
"trackedPoint": "נקודה במעקב",
|
||||
"clickToSeek": "לחץ כדי לחפש את הזמן הזה"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +63,15 @@
|
||||
"label": "לחץ בתוך המסגרת כדי למרכז את המצלמה הממונעת"
|
||||
}
|
||||
},
|
||||
"presets": "מצלמה ממונעת - פריסטים"
|
||||
"presets": "מצלמה ממונעת - פריסטים",
|
||||
"focus": {
|
||||
"in": {
|
||||
"label": "כניסת פוקוס מצלמת PTZ"
|
||||
},
|
||||
"out": {
|
||||
"label": "יציאת פוקוס מצלמת PTZ"
|
||||
}
|
||||
}
|
||||
},
|
||||
"camera": {
|
||||
"enable": "אפשור מצלמה",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"filter": "לסנן",
|
||||
"filter": "מסנן",
|
||||
"export": "ייצוא",
|
||||
"calendar": "לוח שנה",
|
||||
"filters": "מסננים",
|
||||
|
||||
@@ -268,7 +268,9 @@
|
||||
"notifications": "הגדרת התראות - Frigate",
|
||||
"authentication": "הגדרות אימות - Frigate",
|
||||
"default": "הגדרות - Frigate",
|
||||
"general": "הגדרות כלליות - Frigate"
|
||||
"general": "הגדרות כלליות - Frigate",
|
||||
"cameraManagement": "ניהול מצלמות - Frigate",
|
||||
"cameraReview": "הגדרות סקירת מצלמה - Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"ui": "UI - ממשק משתמש",
|
||||
@@ -280,7 +282,10 @@
|
||||
"notifications": "התראות",
|
||||
"frigateplus": "+Frigate",
|
||||
"enrichments": "תוספות",
|
||||
"triggers": "הפעלות"
|
||||
"triggers": "הפעלות",
|
||||
"cameraManagement": "ניהול",
|
||||
"cameraReview": "סְקִירָה",
|
||||
"roles": "תפקידים"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
|
||||
@@ -52,7 +52,8 @@
|
||||
"inferenceSpeed": "מהירות זיהוי",
|
||||
"temperature": "טמפרטורת הגלאי",
|
||||
"cpuUsage": "ניצול מעבד על ידי הגלאי",
|
||||
"memoryUsage": "שימוש בזיכרון על ידי הגלאי"
|
||||
"memoryUsage": "שימוש בזיכרון על ידי הגלאי",
|
||||
"cpuUsageInformation": "המעבד המשמש להכנת נתוני קלט ופלט אל/ממודלי זיהוי. ערך זה אינו מודד את השימוש בהסקה, גם אם נעשה שימוש במעבד גרפי או מאיץ."
|
||||
},
|
||||
"hardwareInfo": {
|
||||
"gpuMemory": "זיכרון GPU",
|
||||
|
||||
@@ -176,7 +176,7 @@
|
||||
},
|
||||
"role": {
|
||||
"viewer": "Néző",
|
||||
"title": "Szerep",
|
||||
"title": "Szerepkör",
|
||||
"admin": "Adminisztrátor",
|
||||
"desc": "Az adminisztrátoroknak teljes hozzáférése van az összes feature-höz. A nézők csak a kamerákat láthatják, áttekinthetik az elemeket és az előzményeket a UI-on."
|
||||
},
|
||||
@@ -221,6 +221,14 @@
|
||||
"length": {
|
||||
"feet": "láb",
|
||||
"meters": "méter"
|
||||
},
|
||||
"data": {
|
||||
"kbps": "kB/s",
|
||||
"mbps": "MB/s",
|
||||
"gbps": "GB/s",
|
||||
"kbph": "kB/óra",
|
||||
"mbph": "MB/óra",
|
||||
"gbph": "GB/óra"
|
||||
}
|
||||
},
|
||||
"button": {
|
||||
@@ -263,5 +271,8 @@
|
||||
"label": {
|
||||
"back": "Vissza"
|
||||
},
|
||||
"readTheDocumentation": "Olvassa el a dokumentációt"
|
||||
"readTheDocumentation": "Olvassa el a dokumentációt",
|
||||
"information": {
|
||||
"pixels": "{{area}}px"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"button": "Újraindítás",
|
||||
"restarting": {
|
||||
"title": "A Frigate újraindul",
|
||||
"content": "Az oldal újrtölt {{countdown}} másodperc múlva.",
|
||||
"content": "Az oldal újratölt {{countdown}} másodperc múlva.",
|
||||
"button": "Erőltetett újraindítás azonnal"
|
||||
}
|
||||
},
|
||||
@@ -107,7 +107,8 @@
|
||||
"button": {
|
||||
"markAsReviewed": "Megjelölés áttekintettként",
|
||||
"deleteNow": "Törlés Most",
|
||||
"export": "Exportálás"
|
||||
"export": "Exportálás",
|
||||
"markAsUnreviewed": "Megjelölés nem ellenőrzöttként"
|
||||
}
|
||||
},
|
||||
"imagePicker": {
|
||||
|
||||
@@ -134,6 +134,9 @@
|
||||
"playInBackground": {
|
||||
"label": "Lejátszás a háttérben",
|
||||
"tips": "Engedélyezze ezt az opciót a folyamatos közvetítéshez akkor is, ha a lejátszó rejtve van."
|
||||
},
|
||||
"debug": {
|
||||
"picker": "A stream kiválasztása nem érhető el hibakeresési módban. A hibakeresési nézet mindig az észlelési szerepkörhöz rendelt streamet használja."
|
||||
}
|
||||
},
|
||||
"cameraSettings": {
|
||||
@@ -167,5 +170,16 @@
|
||||
"transcription": {
|
||||
"enable": "Élő Audio Feliratozás Engedélyezése",
|
||||
"disable": "Élő Audio Feliratozás Kikapcsolása"
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "Nincsenek kamerák beállítva",
|
||||
"description": "Kezdje egy kamera csatlakoztatásával.",
|
||||
"buttonText": "Kamera hozzáadása"
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "Azonnali pillanatkép letöltése",
|
||||
"noVideoSource": "Ehhez a pillanatképhez videó forrás nem elérhető.",
|
||||
"captureFailed": "Pillanatkép készítése sikertelen.",
|
||||
"downloadStarted": "Pillanatkép letöltése elindítva."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
"frigatePlus": "Frigate+ beállítások - Frigate",
|
||||
"notifications": "Értesítések beállítása - Frigate",
|
||||
"motionTuner": "Mozgás Hangoló - Frigate",
|
||||
"enrichments": "Kiegészítés Beállítások - Frigate"
|
||||
"enrichments": "Kiegészítés Beállítások - Frigate",
|
||||
"cameraManagement": "Kamerák kezelése - Frigate",
|
||||
"cameraReview": "Kamera beállítások áttekintése – Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"ui": "UI",
|
||||
@@ -23,7 +25,10 @@
|
||||
"notifications": "Értesítések",
|
||||
"frigateplus": "Frigate+",
|
||||
"enrichments": "Extra funkciók",
|
||||
"triggers": "Triggerek"
|
||||
"triggers": "Triggerek",
|
||||
"roles": "Szerepkörök",
|
||||
"cameraManagement": "Menedzsment",
|
||||
"cameraReview": "Vizsgálat"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
@@ -254,7 +259,8 @@
|
||||
"admin": "Adminisztrátor",
|
||||
"intro": "Válassza ki a megfelelő szerepkört ehhez a felhasználóhoz:",
|
||||
"adminDesc": "Teljes hozzáférés az összes funkcióhoz.",
|
||||
"viewerDesc": "Csak az Élő irányítópultokhoz, Ellenőrzéshez, Felfedezéshez és Exportokhoz korlátozva."
|
||||
"viewerDesc": "Csak az Élő irányítópultokhoz, Ellenőrzéshez, Felfedezéshez és Exportokhoz korlátozva.",
|
||||
"customDesc": "Egyéni szerepkör meghatározott kamerahozzáféréssel."
|
||||
},
|
||||
"title": "Felhasználói szerepkör módosítása",
|
||||
"select": "Válasszon szerepkört",
|
||||
@@ -317,7 +323,7 @@
|
||||
"username": "Felhasználói név",
|
||||
"password": "Jelszó",
|
||||
"deleteUser": "Felhasználó törlése",
|
||||
"actions": "Műveletek",
|
||||
"actions": "Akciók",
|
||||
"role": "Szerepkör",
|
||||
"changeRole": "felhasználói szerepkör módosítása"
|
||||
},
|
||||
@@ -749,6 +755,11 @@
|
||||
"error": {
|
||||
"min": "Legalább egy műveletet ki kell választani."
|
||||
}
|
||||
},
|
||||
"friendly_name": {
|
||||
"title": "Barátságos név",
|
||||
"placeholder": "Nevezd meg vagy írd le ezt a triggert",
|
||||
"description": "Egy opcionális felhasználóbarát név vagy leíró szöveg ehhez az eseményindítóhoz."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -763,6 +774,79 @@
|
||||
"updateTriggerFailed": "A trigger módosítása sikertelen: {{errorMessage}}",
|
||||
"deleteTriggerFailed": "A trigger törlése sikertelen: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"semanticSearch": {
|
||||
"title": "Szemantikus keresés le van tiltva",
|
||||
"desc": "A Triggerek használatához engedélyezni kell a szemantikus keresést."
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"management": {
|
||||
"title": "Megtekintői szerepkör-kezelés",
|
||||
"desc": "Kezelje az egyéni nézői szerepköröket és a kamera-hozzáférési engedélyeiket ehhez a Frigate-példányhoz."
|
||||
},
|
||||
"addRole": "Szerepkör hozzáadása",
|
||||
"table": {
|
||||
"role": "Szerepkör",
|
||||
"cameras": "Kamerák",
|
||||
"actions": "Akciók",
|
||||
"noRoles": "Nem találhatók egyéni szerepkörök.",
|
||||
"editCameras": "Kamerák módosítása",
|
||||
"deleteRole": "Szerepkör törlése"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"createRole": "Szerepkör létrehozva: {{role}}",
|
||||
"updateCameras": "Kamerák frissítve a szerepkörhöz: {{role}}",
|
||||
"deleteRole": "Szerepkör sikeresen törölve: {{role}}",
|
||||
"userRolesUpdated": "{{count}} felhasználó, akit ehhez a szerepkörhöz rendeltünk, frissült „néző”-re, amely hozzáféréssel rendelkezik az összes kamerához."
|
||||
},
|
||||
"error": {
|
||||
"createRoleFailed": "Nem sikerült létrehozni a szerepkört: {{errorMessage}}",
|
||||
"updateCamerasFailed": "Nem sikerült frissíteni a kamerákat: {{errorMessage}}",
|
||||
"deleteRoleFailed": "Nem sikerült törölni a szerepkört: {{errorMessage}}",
|
||||
"userUpdateFailed": "Nem sikerült frissíteni a felhasználói szerepköröket: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"dialog": {
|
||||
"createRole": {
|
||||
"title": "Új szerepkör létrehozása",
|
||||
"desc": "Adjon hozzá egy új szerepkört, és adja meg a kamera hozzáférési engedélyeit."
|
||||
},
|
||||
"editCameras": {
|
||||
"title": "Szerepkör kamerák szerkesztése",
|
||||
"desc": "Frissítse a kamerahozzáférést a(z) <strong>{{role}}</strong> szerepkörhöz."
|
||||
},
|
||||
"deleteRole": {
|
||||
"title": "Szerepkör törlése",
|
||||
"desc": "Ez a művelet nem vonható vissza. Ez véglegesen törli a szerepkört, és az ezzel a szerepkörrel rendelkező összes felhasználót a „megtekintő” szerepkörhöz rendeli, amivel a megtekintő hozzáférhet az összes kamerához.",
|
||||
"warn": "Biztosan törölni szeretnéd a(z) <strong>{{role}}</strong> szerepkört?",
|
||||
"deleting": "Törlés..."
|
||||
},
|
||||
"form": {
|
||||
"role": {
|
||||
"title": "Szerepkör neve",
|
||||
"placeholder": "Adja meg a szerepkör nevét",
|
||||
"desc": "Csak betűk, számok, pontok és aláhúzásjelek engedélyezettek.",
|
||||
"roleIsRequired": "A szerepkör nevének megadása kötelező",
|
||||
"roleOnlyInclude": "A szerepkör neve csak betűket, számokat , . vagy _ karaktereket tartalmazhat",
|
||||
"roleExists": "Már létezik egy ilyen nevű szerepkör."
|
||||
},
|
||||
"cameras": {
|
||||
"title": "Kamerák",
|
||||
"desc": "Válassza ki azokat a kamerákat, amelyekhez ennek a szerepkörnek hozzáférése van. Legalább egy kamera megadása szükséges.",
|
||||
"required": "Legalább egy kamerát ki kell választani."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraWizard": {
|
||||
"title": "Kamera hozzáadása",
|
||||
"description": "Kövesse az alábbi lépéseket, hogy új kamerát adjon hozzá a Frigate telepítéséhez.",
|
||||
"steps": {
|
||||
"nameAndConnection": "Név & adatkapcsolat",
|
||||
"streamConfiguration": "Stream beállítások",
|
||||
"validationAndTesting": "Validálás és tesztelés"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,5 +81,8 @@
|
||||
"electric_guitar": "Gitar Elektrik",
|
||||
"acoustic_guitar": "Gitar Akustik",
|
||||
"strum": "Genjreng",
|
||||
"banjo": "Banjo"
|
||||
"banjo": "Banjo",
|
||||
"snoring": "Ngorok",
|
||||
"cough": "Batuk",
|
||||
"clapping": "Tepukan"
|
||||
}
|
||||
|
||||
@@ -8,6 +8,11 @@
|
||||
"motionTuner": "Penyetel Gerakan - Frigate",
|
||||
"general": "Frigate - Pengaturan Umum",
|
||||
"object": "Debug - Frigate",
|
||||
"enrichments": "Frigate - Pengaturan Pengayaan"
|
||||
"enrichments": "Frigate - Pengaturan Pengayaan",
|
||||
"cameraManagement": "Pengaturan Kamera - Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"cameraManagement": "Pengaturan",
|
||||
"notifications": "Notifikasi"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -425,5 +425,13 @@
|
||||
"white_noise": "Rumore bianco",
|
||||
"pink_noise": "Rumore rosa",
|
||||
"field_recording": "Registrazione sul campo",
|
||||
"scream": "Grido"
|
||||
"scream": "Grido",
|
||||
"vibration": "Vibrazione",
|
||||
"sodeling": "Zollatura",
|
||||
"chird": "Accordo",
|
||||
"change_ringing": "Cambia suoneria",
|
||||
"shofar": "Shofar",
|
||||
"liquid": "Liquido",
|
||||
"splash": "Schizzo",
|
||||
"slosh": "Sciabordio"
|
||||
}
|
||||
|
||||
@@ -134,6 +134,14 @@
|
||||
"length": {
|
||||
"feet": "piedi",
|
||||
"meters": "metri"
|
||||
},
|
||||
"data": {
|
||||
"kbps": "kB/s",
|
||||
"mbps": "MB/s",
|
||||
"gbps": "GB/s",
|
||||
"kbph": "kB/ora",
|
||||
"mbph": "MB/ora",
|
||||
"gbph": "GB/ora"
|
||||
}
|
||||
},
|
||||
"label": {
|
||||
@@ -280,5 +288,8 @@
|
||||
}
|
||||
},
|
||||
"selectItem": "Seleziona {{item}}",
|
||||
"readTheDocumentation": "Leggi la documentazione"
|
||||
"readTheDocumentation": "Leggi la documentazione",
|
||||
"information": {
|
||||
"pixels": "{{area}}px"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +110,8 @@
|
||||
"button": {
|
||||
"export": "Esporta",
|
||||
"markAsReviewed": "Segna come visto",
|
||||
"deleteNow": "Elimina ora"
|
||||
"deleteNow": "Elimina ora",
|
||||
"markAsUnreviewed": "Segna come non visto"
|
||||
},
|
||||
"confirmDelete": {
|
||||
"desc": {
|
||||
|
||||
@@ -37,5 +37,16 @@
|
||||
"selected_other": "{{count}} selezionati",
|
||||
"detected": "rilevato",
|
||||
"suspiciousActivity": "Attività sospetta",
|
||||
"threateningActivity": "Attività minacciosa"
|
||||
"threateningActivity": "Attività minacciosa",
|
||||
"detail": {
|
||||
"noDataFound": "Nessun dato dettagliato da rivedere",
|
||||
"aria": "Attiva/disattiva la visualizzazione dettagliata",
|
||||
"trackedObject_one": "oggetto tracciato",
|
||||
"trackedObject_other": "oggetti tracciati",
|
||||
"noObjectDetailData": "Non sono disponibili dati dettagliati sull'oggetto."
|
||||
},
|
||||
"objectTrack": {
|
||||
"trackedPoint": "Punto tracciato",
|
||||
"clickToSeek": "Premi per cercare in questo momento"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
},
|
||||
"imageEntry": {
|
||||
"dropActive": "Rilascia l'immagine qui…",
|
||||
"dropInstructions": "Trascina e rilascia un'immagine qui oppure fai clic per selezionarla",
|
||||
"dropInstructions": "Trascina e rilascia o incolla un'immagine qui oppure fai clic per selezionarla",
|
||||
"maxSize": "Dimensione massima: {{size}} MB",
|
||||
"validation": {
|
||||
"selectImage": "Seleziona un file immagine."
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
},
|
||||
"manualRecording": {
|
||||
"recordDisabledTips": "Poiché la registrazione è disabilitata o limitata nella configurazione di questa telecamera, verrà salvata solo un'istantanea.",
|
||||
"title": "Registrazione su richiesta",
|
||||
"tips": "Avvia un evento manuale in base alle impostazioni di conservazione della registrazione di questa telecamera.",
|
||||
"title": "Su richiesta",
|
||||
"tips": "Scarica un'istantanea attuale o avvia un evento manuale in base alle impostazioni di conservazione della registrazione di questa telecamera.",
|
||||
"playInBackground": {
|
||||
"label": "Riproduci in sottofondo",
|
||||
"desc": "Abilita questa opzione per continuare la trasmissione quando il lettore è nascosto."
|
||||
@@ -147,6 +147,9 @@
|
||||
"lowBandwidth": {
|
||||
"tips": "La visualizzazione dal vivo è in modalità a bassa larghezza di banda a causa di errori di caricamento o di trasmissione.",
|
||||
"resetStream": "Reimposta flusso"
|
||||
},
|
||||
"debug": {
|
||||
"picker": "Selezione del flusso non disponibile in modalità correzioni. La visualizzazione correzioni utilizza sempre il flusso a cui è assegnato il ruolo di rilevamento."
|
||||
}
|
||||
},
|
||||
"effectiveRetainMode": {
|
||||
@@ -167,5 +170,16 @@
|
||||
"transcription": {
|
||||
"enable": "Abilita la trascrizione audio in tempo reale",
|
||||
"disable": "Disabilita la trascrizione audio in tempo reale"
|
||||
},
|
||||
"noCameras": {
|
||||
"buttonText": "Aggiungi telecamera",
|
||||
"title": "Nessuna telecamera configurata",
|
||||
"description": "Per iniziare, collega una telecamera."
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "Scarica l'istantanea attuale",
|
||||
"noVideoSource": "Nessuna sorgente video disponibile per l'istantanea.",
|
||||
"captureFailed": "Impossibile catturare l'istantanea.",
|
||||
"downloadStarted": "Scaricamento istantanea avviato."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
"general": "Impostazioni generali - Frigate",
|
||||
"frigatePlus": "Impostazioni Frigate+ - Frigate",
|
||||
"notifications": "Impostazioni di notifiche - Frigate",
|
||||
"enrichments": "Impostazioni di miglioramento - Frigate"
|
||||
"enrichments": "Impostazioni di miglioramento - Frigate",
|
||||
"cameraManagement": "Gestisci telecamere - Frigate",
|
||||
"cameraReview": "Impostazioni revisione telecamera - Frigate"
|
||||
},
|
||||
"frigatePlus": {
|
||||
"snapshotConfig": {
|
||||
@@ -378,7 +380,10 @@
|
||||
"users": "Utenti",
|
||||
"frigateplus": "Frigate+",
|
||||
"enrichments": "Miglioramenti",
|
||||
"triggers": "Inneschi"
|
||||
"triggers": "Inneschi",
|
||||
"roles": "Ruoli",
|
||||
"cameraManagement": "Gestione",
|
||||
"cameraReview": "Rivedi"
|
||||
},
|
||||
"users": {
|
||||
"dialog": {
|
||||
@@ -815,6 +820,11 @@
|
||||
"error": {
|
||||
"min": "È necessario selezionare almeno un'azione."
|
||||
}
|
||||
},
|
||||
"friendly_name": {
|
||||
"title": "Nome semplice",
|
||||
"placeholder": "Assegna un nome o descrivi questo innesco",
|
||||
"description": "Un nome semplice o un testo descrittivo facoltativo per questo innesco."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -829,6 +839,10 @@
|
||||
"updateTriggerFailed": "Impossibile aggiornare l'innesco: {{errorMessage}}",
|
||||
"deleteTriggerFailed": "Impossibile eliminare l'innesco: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"semanticSearch": {
|
||||
"title": "La ricerca semantica è disabilitata",
|
||||
"desc": "Per utilizzare gli inneschi, è necessario abilitare la ricerca semantica."
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
@@ -890,5 +904,222 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraReview": {
|
||||
"title": "Impostazioni revisione telecamera",
|
||||
"object_descriptions": {
|
||||
"title": "Descrizioni oggetti IA generativa",
|
||||
"desc": "Abilita/disabilita temporaneamente le descrizioni degli oggetti generate dall'IA per questa telecamera. Se disabilitate, le descrizioni generate dall'IA non verranno richieste per gli oggetti tracciati su questa telecamera."
|
||||
},
|
||||
"review_descriptions": {
|
||||
"title": "Descrizioni revisioni IA generativa",
|
||||
"desc": "Abilita/disabilita temporaneamente le descrizioni delle revisioni generate dall'IA per questa telecamera. Se disabilitate, le descrizioni generate dall'IA non verranno richieste per gli elementi da rivedere su questa telecamera."
|
||||
},
|
||||
"review": {
|
||||
"title": "Rivedi",
|
||||
"desc": "Abilita/disabilita temporaneamente avvisi e rilevamenti per questa telecamera fino al riavvio di Frigate. Se disabilitato, non verranno generati nuovi elementi di revisione. ",
|
||||
"alerts": "Avvisi ",
|
||||
"detections": "Rilevamenti "
|
||||
},
|
||||
"reviewClassification": {
|
||||
"title": "Classificazione revisione",
|
||||
"desc": "Frigate categorizza gli elementi di revisione come Avvisi e Rilevamenti. Per impostazione predefinita, tutti gli oggetti <em>persona</em> e <em>auto</em> sono considerati Avvisi. È possibile perfezionare la categorizzazione degli elementi di revisione configurando le zone richieste per ciascuno di essi.",
|
||||
"noDefinedZones": "Per questa telecamera non sono definite zone.",
|
||||
"objectAlertsTips": "Tutti gli oggetti {{alertsLabels}} su {{cameraName}} verranno mostrati come Avvisi.",
|
||||
"zoneObjectAlertsTips": "Tutti gli oggetti {{alertsLabels}} rilevati in {{zone}} su {{cameraName}} verranno mostrati come Avvisi.",
|
||||
"objectDetectionsTips": "Tutti gli oggetti {{detectionsLabels}} non categorizzati su {{cameraName}} verranno mostrati come Rilevamenti, indipendentemente dalla zona in cui si trovano.",
|
||||
"zoneObjectDetectionsTips": {
|
||||
"text": "Tutti gli oggetti {{detectionsLabels}} non categorizzati in {{zone}} su {{cameraName}} verranno mostrati come Rilevamenti.",
|
||||
"notSelectDetections": "Tutti gli oggetti {{detectionsLabels}} rilevati in {{zone}} su {{cameraName}} non classificati come Avvisi verranno mostrati come Rilevamenti, indipendentemente dalla zona in cui si trovano.",
|
||||
"regardlessOfZoneObjectDetectionsTips": "Tutti gli oggetti {{detectionsLabels}} non categorizzati su {{cameraName}} verranno mostrati come Rilevamenti, indipendentemente dalla zona in cui si trovano."
|
||||
},
|
||||
"unsavedChanges": "Impostazioni di classificazione delle revisioni non salvate per {{camera}}",
|
||||
"selectAlertsZones": "Seleziona le zone per gli Avvisi",
|
||||
"selectDetectionsZones": "Seleziona le zone per i Rilevamenti",
|
||||
"limitDetections": "Limita i rilevamenti a zone specifiche",
|
||||
"toast": {
|
||||
"success": "La configurazione della classificazione di revisione è stata salvata. Riavvia Frigate per applicare le modifiche."
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraWizard": {
|
||||
"step3": {
|
||||
"streamUnavailable": "Anteprima trasmissione non disponibile",
|
||||
"description": "Convalida e analisi finale prima di salvare la nuova telecamera. Connetti ogni flusso prima di salvare.",
|
||||
"validationTitle": "Convalida del flusso",
|
||||
"connectAllStreams": "Connetti tutti i flussi",
|
||||
"reconnectionSuccess": "Riconnessione riuscita.",
|
||||
"reconnectionPartial": "Alcuni flussi non sono riusciti a riconnettersi.",
|
||||
"reload": "Ricarica",
|
||||
"connecting": "Connessione...",
|
||||
"streamTitle": "Flusso {{number}}",
|
||||
"valid": "Convalida",
|
||||
"failed": "Fallito",
|
||||
"notTested": "Non verificata",
|
||||
"connectStream": "Connetti",
|
||||
"connectingStream": "Connessione",
|
||||
"disconnectStream": "Disconnetti",
|
||||
"estimatedBandwidth": "Larghezza di banda stimata",
|
||||
"roles": "Ruoli",
|
||||
"none": "Nessuno",
|
||||
"error": "Errore",
|
||||
"streamValidated": "Flusso {{number}} convalidato con successo",
|
||||
"streamValidationFailed": "Convalida del flusso {{number}} non riuscita",
|
||||
"saveAndApply": "Salva nuova telecamera",
|
||||
"saveError": "Configurazione non valida. Controlla le impostazioni.",
|
||||
"issues": {
|
||||
"title": "Convalida del flusso",
|
||||
"videoCodecGood": "Il codec video è {{codec}}.",
|
||||
"audioCodecGood": "Il codec audio è {{codec}}.",
|
||||
"noAudioWarning": "Nessun audio rilevato per questo flusso, le registrazioni non avranno audio.",
|
||||
"audioCodecRecordError": "Per supportare l'audio nelle registrazioni è necessario il codec audio AAC.",
|
||||
"audioCodecRequired": "Per supportare il rilevamento audio è necessario un flusso audio.",
|
||||
"restreamingWarning": "Riducendo le connessioni alla telecamera per il flusso di registrazione l'utilizzo della CPU potrebbe aumentare leggermente.",
|
||||
"dahua": {
|
||||
"substreamWarning": "Il flusso 1 è bloccato a bassa risoluzione. Molte telecamere Dahua/Amcrest/EmpireTech supportano flussi aggiuntivi che devono essere abilitati nelle impostazioni della telecamera. Si consiglia di controllare e utilizzare tali flussi, se disponibili."
|
||||
},
|
||||
"hikvision": {
|
||||
"substreamWarning": "Il flusso 1 è bloccato a bassa risoluzione. Molte telecamere Hikvision supportano flussi aggiuntivi che devono essere abilitati nelle impostazioni della telecamera. Si consiglia di controllare e utilizzare tali flussi, se disponibili."
|
||||
}
|
||||
}
|
||||
},
|
||||
"title": "Aggiungi telecamera",
|
||||
"description": "Per aggiungere una nuova telecamera alla tua installazione Frigate, segui i passaggi indicati di seguito.",
|
||||
"steps": {
|
||||
"nameAndConnection": "Nome e connessione",
|
||||
"streamConfiguration": "Configurazione flusso",
|
||||
"validationAndTesting": "Validazione e prova"
|
||||
},
|
||||
"save": {
|
||||
"success": "Nuova telecamera {{cameraName}} salvata correttamente.",
|
||||
"failure": "Errore durante il salvataggio di {{cameraName}}."
|
||||
},
|
||||
"testResultLabels": {
|
||||
"resolution": "Risoluzione",
|
||||
"video": "Video",
|
||||
"audio": "Audio",
|
||||
"fps": "FPS"
|
||||
},
|
||||
"commonErrors": {
|
||||
"noUrl": "Si prega di fornire un URL di flusso valido",
|
||||
"testFailed": "Prova del flusso fallita: {{error}}"
|
||||
},
|
||||
"step1": {
|
||||
"description": "Inserisci i dettagli della tua telecamera e verifica la connessione.",
|
||||
"cameraName": "Nome telecamera",
|
||||
"cameraNamePlaceholder": "ad esempio, porta_anteriore o Panoramica cortile",
|
||||
"host": "Indirizzo sistema/IP",
|
||||
"port": "Porta",
|
||||
"username": "Nome utente",
|
||||
"usernamePlaceholder": "Opzionale",
|
||||
"password": "Password",
|
||||
"passwordPlaceholder": "Opzionale",
|
||||
"selectTransport": "Seleziona il protocollo di trasmissione",
|
||||
"cameraBrand": "Marca telecamera",
|
||||
"selectBrand": "Seleziona la marca della telecamera per il modello URL",
|
||||
"customUrl": "URL del flusso personalizzato",
|
||||
"brandInformation": "Informazioni sul marchio",
|
||||
"brandUrlFormat": "Per le telecamere con formato URL RTSP come: {{exampleUrl}}",
|
||||
"customUrlPlaceholder": "rtsp://nomeutente:password@sistema:porta/percorso",
|
||||
"testConnection": "Prova connessione",
|
||||
"testSuccess": "Prova di connessione riuscita!",
|
||||
"testFailed": "Prova di connessione fallita. Controlla i dati immessi e riprova.",
|
||||
"streamDetails": "Dettagli del flusso",
|
||||
"warnings": {
|
||||
"noSnapshot": "Impossibile recuperare un'immagine dal flusso configurato."
|
||||
},
|
||||
"errors": {
|
||||
"brandOrCustomUrlRequired": "Seleziona una marca di telecamera con sistema/IP oppure scegli \"Altro\" con un URL personalizzato",
|
||||
"nameRequired": "Il nome della telecamera è obbligatorio",
|
||||
"nameLength": "Il nome della telecamera deve contenere al massimo 64 caratteri",
|
||||
"invalidCharacters": "Il nome della telecamera contiene caratteri non validi",
|
||||
"nameExists": "Il nome della telecamera esiste già",
|
||||
"brands": {
|
||||
"reolink-rtsp": "Reolink RTSP non è consigliato. Si consiglia di abilitare http nelle impostazioni della telecamera e riavviare la procedura guidata."
|
||||
}
|
||||
},
|
||||
"docs": {
|
||||
"reolink": "https://docs.frigate.video/configuration/camera_specific.html#reolink-cameras"
|
||||
}
|
||||
},
|
||||
"step2": {
|
||||
"description": "Configura i ruoli del flusso e aggiungi altri flussi per la tua telecamera.",
|
||||
"streamsTitle": "Flussi della telecamera",
|
||||
"addStream": "Aggiungi flusso",
|
||||
"addAnotherStream": "Aggiungi un altro flusso",
|
||||
"streamTitle": "Flusso {{number}}",
|
||||
"streamUrl": "URL del flusso",
|
||||
"streamUrlPlaceholder": "rtsp://nomeutente:password@sistema:porta/percorso",
|
||||
"url": "URL",
|
||||
"resolution": "Risoluzione",
|
||||
"selectResolution": "Seleziona la risoluzione",
|
||||
"quality": "Qualità",
|
||||
"selectQuality": "Seleziona la qualità",
|
||||
"roles": "Ruoli",
|
||||
"roleLabels": {
|
||||
"detect": "Rilevamento oggetti",
|
||||
"record": "Registrazione",
|
||||
"audio": "Audio"
|
||||
},
|
||||
"testStream": "Prova connessione",
|
||||
"testSuccess": "Prova del flusso riuscita!",
|
||||
"testFailed": "Prova del flusso fallita",
|
||||
"testFailedTitle": "Prova fallita",
|
||||
"connected": "Connessa",
|
||||
"notConnected": "Non connessa",
|
||||
"featuresTitle": "Caratteristiche",
|
||||
"go2rtc": "Riduci le connessioni alla telecamera",
|
||||
"detectRoleWarning": "Per procedere, almeno un flusso deve avere il ruolo \"rileva\".",
|
||||
"rolesPopover": {
|
||||
"title": "Ruoli del flusso",
|
||||
"detect": "Flusso principale per il rilevamento degli oggetti.",
|
||||
"record": "Salva segmenti del flusso video in base alle impostazioni di configurazione.",
|
||||
"audio": "Flusso per il rilevamento basato sull'audio."
|
||||
},
|
||||
"featuresPopover": {
|
||||
"title": "Caratteristiche del flusso",
|
||||
"description": "Utilizza la ritrasmissione go2rtc per ridurre le connessioni alla tua telecamera."
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraManagement": {
|
||||
"title": "Gestisci telecamere",
|
||||
"addCamera": "Aggiungi nuova telecamera",
|
||||
"editCamera": "Modifica telecamera:",
|
||||
"selectCamera": "Seleziona una telecamera",
|
||||
"backToSettings": "Torna alle impostazioni della telecamera",
|
||||
"streams": {
|
||||
"title": "Abilita/Disabilita telecamere",
|
||||
"desc": "Disattiva temporaneamente una telecamera fino al riavvio di Frigate. La disattivazione completa di una telecamera interrompe l'elaborazione dei flussi di questa telecamera da parte di Frigate. Rilevamento, registrazione e correzioni non saranno disponibili.<br /> <em>Nota: questa operazione non disattiva le ritrasmissioni di go2rtc.</em>"
|
||||
},
|
||||
"cameraConfig": {
|
||||
"add": "Aggiungi telecamera",
|
||||
"edit": "Modifica telecamera",
|
||||
"description": "Configura le impostazioni della telecamera, inclusi gli ingressi ed i ruoli dei flussi.",
|
||||
"name": "Nome telecamera",
|
||||
"nameRequired": "Il nome della telecamera è obbligatorio",
|
||||
"nameLength": "Il nome della telecamera deve contenere al massimo 64 caratteri.",
|
||||
"namePlaceholder": "ad esempio, porta_anteriore o Panoramica cortile",
|
||||
"toast": {
|
||||
"success": "La telecamera {{cameraName}} è stata salvata correttamente"
|
||||
},
|
||||
"enabled": "Abilitata",
|
||||
"ffmpeg": {
|
||||
"inputs": "Flussi di ingresso",
|
||||
"path": "Percorso del flusso",
|
||||
"pathRequired": "Il percorso del flusso è obbligatorio",
|
||||
"pathPlaceholder": "rtsp://...",
|
||||
"roles": "Ruoli",
|
||||
"rolesRequired": "È richiesto almeno un ruolo",
|
||||
"rolesUnique": "Ogni ruolo (audio, rilevamento, registrazione) può essere assegnato solo ad un flusso",
|
||||
"addInput": "Aggiungi flusso di ingresso",
|
||||
"removeInput": "Rimuovi flusso di ingresso",
|
||||
"inputsRequired": "È richiesto almeno un flusso di ingresso"
|
||||
},
|
||||
"go2rtcStreams": "Flussi go2rtc",
|
||||
"streamUrls": "URL dei flussi",
|
||||
"addUrl": "Aggiungi URL",
|
||||
"addGo2rtcStream": "Aggiungi flusso go2rtc"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,6 +77,14 @@
|
||||
"length": {
|
||||
"feet": "フィート",
|
||||
"meters": "メートル"
|
||||
},
|
||||
"data": {
|
||||
"gbph": "GB/hour",
|
||||
"gbps": "GB/s",
|
||||
"kbph": "kB/hour",
|
||||
"kbps": "kB/s",
|
||||
"mbph": "MB/hour",
|
||||
"mbps": "MB/s"
|
||||
}
|
||||
},
|
||||
"label": {
|
||||
@@ -256,5 +264,8 @@
|
||||
"title": "404",
|
||||
"desc": "ページが見つかりません"
|
||||
},
|
||||
"selectItem": "{{item}} を選択"
|
||||
"selectItem": "{{item}} を選択",
|
||||
"information": {
|
||||
"pixels": "{{area}}ピクセル"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,7 +105,8 @@
|
||||
"button": {
|
||||
"export": "書き出し",
|
||||
"markAsReviewed": "レビュー済みにする",
|
||||
"deleteNow": "今すぐ削除"
|
||||
"deleteNow": "今すぐ削除",
|
||||
"markAsUnreviewed": "未レビューに戻す"
|
||||
}
|
||||
},
|
||||
"imagePicker": {
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
}
|
||||
},
|
||||
"downloadingModels": {
|
||||
"context": "Frigate はセマンティック検索をサポートするために必要な埋め込みモデルをダウンロードしています。ネットワーク速度により数分かかる場合があります。",
|
||||
"context": "Frigate はセマンティック検索(意味理解型画像検索)をサポートするために必要な埋め込みモデルをダウンロードしています。ネットワーク速度により数分かかる場合があります。",
|
||||
"setup": {
|
||||
"visionModel": "ビジョンモデル",
|
||||
"visionModelFeatureExtractor": "ビジョンモデル特徴抽出器",
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
"selectImage": "画像ファイルを選択してください。"
|
||||
},
|
||||
"dropActive": "ここに画像をドロップ…",
|
||||
"dropInstructions": "画像をここにドラッグ&ドロップ、またはクリックして選択",
|
||||
"dropInstructions": "画像をここにドラッグ&ドロップ、ペースト、またはクリックして選択",
|
||||
"maxSize": "最大サイズ: {{size}}MB"
|
||||
},
|
||||
"nofaces": "顔はありません",
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
},
|
||||
"manualRecording": {
|
||||
"title": "オンデマンド録画",
|
||||
"tips": "このカメラの録画保持設定に基づく手動イベントを開始します。",
|
||||
"tips": "このカメラの録画保持設定に基づいて、即時スナップショットをダウンロードするか、手動イベントを開始してください。",
|
||||
"playInBackground": {
|
||||
"label": "バックグラウンドで再生",
|
||||
"desc": "プレーヤーが非表示の場合でもストリーミングを継続するにはこのオプションを有効にします。"
|
||||
@@ -136,6 +136,9 @@
|
||||
"playInBackground": {
|
||||
"label": "バックグラウンドで再生",
|
||||
"tips": "プレーヤーが非表示でもストリーミングを継続するにはこのオプションを有効にします。"
|
||||
},
|
||||
"debug": {
|
||||
"picker": "デバッグモードではストリームの選択はできません。デバッグビューは常に 検出ロールに割り当てられたストリームを使用します。"
|
||||
}
|
||||
},
|
||||
"cameraSettings": {
|
||||
@@ -165,5 +168,16 @@
|
||||
"label": "カメラグループを編集"
|
||||
},
|
||||
"exitEdit": "編集を終了"
|
||||
},
|
||||
"noCameras": {
|
||||
"title": "カメラが設定されていません",
|
||||
"buttonText": "カメラを追加",
|
||||
"description": "開始するには、カメラを接続してください。"
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "即時スナップショットをダウンロード",
|
||||
"noVideoSource": "スナップショットに使用できる映像ソースがありません。",
|
||||
"captureFailed": "スナップショットの取得に失敗しました。",
|
||||
"downloadStarted": "スナップショットのダウンロードを開始しました。"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,17 +3,19 @@
|
||||
"authentication": "認証設定 - Frigate",
|
||||
"camera": "カメラ設定 - Frigate",
|
||||
"default": "設定 - Frigate",
|
||||
"enrichments": "エンリッチメント設定 - Frigate",
|
||||
"enrichments": "高度解析設定 - Frigate",
|
||||
"masksAndZones": "マスク/ゾーンエディタ - Frigate",
|
||||
"motionTuner": "モーションチューナー - Frigate",
|
||||
"object": "デバッグ - Frigate",
|
||||
"general": "一般設定 - Frigate",
|
||||
"frigatePlus": "Frigate+ 設定 - Frigate",
|
||||
"notifications": "通知設定 - Frigate"
|
||||
"notifications": "通知設定 - Frigate",
|
||||
"cameraManagement": "カメラ設定 - Frigate",
|
||||
"cameraReview": "カメラレビュー設定 - Frigate"
|
||||
},
|
||||
"menu": {
|
||||
"ui": "UI",
|
||||
"enrichments": "エンリッチメント",
|
||||
"enrichments": "高度解析",
|
||||
"cameras": "カメラ設定",
|
||||
"masksAndZones": "マスク/ゾーン",
|
||||
"motionTuner": "モーションチューナー",
|
||||
@@ -21,7 +23,10 @@
|
||||
"debug": "デバッグ",
|
||||
"users": "ユーザー",
|
||||
"notifications": "通知",
|
||||
"frigateplus": "Frigate+"
|
||||
"frigateplus": "Frigate+",
|
||||
"cameraManagement": "管理",
|
||||
"cameraReview": "レビュー",
|
||||
"roles": "区分"
|
||||
},
|
||||
"dialog": {
|
||||
"unsavedChanges": {
|
||||
@@ -84,8 +89,8 @@
|
||||
}
|
||||
},
|
||||
"enrichments": {
|
||||
"title": "エンリッチメント設定",
|
||||
"unsavedChanges": "未保存のエンリッチメント設定の変更",
|
||||
"title": "高度解析設定",
|
||||
"unsavedChanges": "未保存の高度解析設定の変更",
|
||||
"birdClassification": {
|
||||
"title": "鳥類分類",
|
||||
"desc": "量子化された TensorFlow モデルを使って既知の鳥を識別します。既知の鳥を認識した場合、その一般名を sub_label として追加します。この情報は UI、フィルタ、通知に含まれます。"
|
||||
@@ -136,9 +141,9 @@
|
||||
"title": "ナンバープレート認識",
|
||||
"desc": "車両のナンバープレートを認識し、検出文字列を recognized_license_plate フィールドへ、または既知の名称を car タイプのオブジェクトの sub_label として自動追加できます。一般的な用途として、私道に入ってくる車や道路を通過する車のナンバー読み取りがあります。"
|
||||
},
|
||||
"restart_required": "再起動が必要です(エンリッチメント設定を変更)",
|
||||
"restart_required": "再起動が必要です(高度解析設定を変更)",
|
||||
"toast": {
|
||||
"success": "エンリッチメント設定を保存しました。変更を適用するには Frigate を再起動してください。",
|
||||
"success": "高度解析設定を保存しました。変更を適用するには Frigate を再起動してください。",
|
||||
"error": "設定変更の保存に失敗しました: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
@@ -793,6 +798,11 @@
|
||||
"error": {
|
||||
"min": "少なくとも1つのアクションを選択してください。"
|
||||
}
|
||||
},
|
||||
"friendly_name": {
|
||||
"title": "表示名",
|
||||
"placeholder": "このトリガーの名前または説明",
|
||||
"description": "このトリガーの表示名または説明文"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -807,6 +817,227 @@
|
||||
"updateTriggerFailed": "トリガーの更新に失敗しました: {{errorMessage}}",
|
||||
"deleteTriggerFailed": "トリガーの削除に失敗しました: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"semanticSearch": {
|
||||
"desc": "トリガーを使用するにはセマンティック検索を有効にする必要があります。",
|
||||
"title": "セマンティック検索が無効です"
|
||||
}
|
||||
},
|
||||
"cameraWizard": {
|
||||
"step3": {
|
||||
"saveAndApply": "新しいカメラを保存",
|
||||
"description": "保存前の最終検証と解析。保存する前に各ストリームを接続してください。",
|
||||
"validationTitle": "ストリーム検証",
|
||||
"connectAllStreams": "すべてのストリームを接続",
|
||||
"reconnectionSuccess": "再接続に成功しました。",
|
||||
"reconnectionPartial": "一部のストリームの再接続に失敗しました。",
|
||||
"streamUnavailable": "ストリームプレビューは利用できません",
|
||||
"reload": "再読み込み",
|
||||
"connecting": "接続中…",
|
||||
"streamTitle": "ストリーム {{number}}",
|
||||
"valid": "有効",
|
||||
"failed": "失敗",
|
||||
"notTested": "未テスト",
|
||||
"connectStream": "接続",
|
||||
"connectingStream": "接続中",
|
||||
"disconnectStream": "切断",
|
||||
"estimatedBandwidth": "推定帯域幅",
|
||||
"roles": "ロール",
|
||||
"none": "なし",
|
||||
"error": "エラー",
|
||||
"streamValidated": "ストリーム {{number}} の検証に成功しました",
|
||||
"streamValidationFailed": "ストリーム {{number}} の検証に失敗しました",
|
||||
"saveError": "無効な構成です。設定を確認してください。",
|
||||
"issues": {
|
||||
"title": "ストリーム検証",
|
||||
"videoCodecGood": "ビデオコーデックは {{codec}} です。",
|
||||
"audioCodecGood": "オーディオコーデックは {{codec}} です。",
|
||||
"noAudioWarning": "このストリームでは音声が検出されません。録画には音声が含まれません。",
|
||||
"audioCodecRecordError": "録画に音声を含めるには AAC オーディオコーデックが必要です。",
|
||||
"audioCodecRequired": "音声検出を有効にするには音声ストリームが必要です。",
|
||||
"restreamingWarning": "録画ストリームでカメラへの接続数を減らすと、CPU 使用率がわずかに増加する場合があります。",
|
||||
"hikvision": {
|
||||
"substreamWarning": "サブストリーム1は低解像度に固定されています。多くの Hikvision 製カメラでは、追加のサブストリームが利用可能であり、カメラ本体の設定で有効化する必要があります。使用できる場合は、それらのストリームを確認して活用することを推奨します。"
|
||||
},
|
||||
"dahua": {
|
||||
"substreamWarning": "サブストリーム1は低解像度に固定されています。多くの Dahua/Amcrest/EmpireTech 製カメラでは、追加のサブストリームが利用可能であり、カメラ本体の設定で有効化する必要があります。使用できる場合は、それらのストリームを確認して活用することを推奨します。"
|
||||
}
|
||||
}
|
||||
},
|
||||
"title": "カメラを追加",
|
||||
"description": "以下の手順に従って、Frigate に新しいカメラを追加します。",
|
||||
"steps": {
|
||||
"nameAndConnection": "名称と接続",
|
||||
"streamConfiguration": "ストリーム設定",
|
||||
"validationAndTesting": "検証とテスト"
|
||||
},
|
||||
"save": {
|
||||
"success": "新しいカメラ {{cameraName}} を保存しました。",
|
||||
"failure": "保存エラー: {{cameraName}}。"
|
||||
},
|
||||
"testResultLabels": {
|
||||
"resolution": "解像度",
|
||||
"video": "ビデオ",
|
||||
"audio": "オーディオ",
|
||||
"fps": "FPS"
|
||||
},
|
||||
"commonErrors": {
|
||||
"noUrl": "有効なストリーム URL を入力してください",
|
||||
"testFailed": "ストリームテストに失敗しました: {{error}}"
|
||||
},
|
||||
"step1": {
|
||||
"description": "カメラの詳細を入力し、接続テストを実行します。",
|
||||
"cameraName": "カメラ名",
|
||||
"cameraNamePlaceholder": "例: front_door または Back Yard Overview",
|
||||
"host": "ホスト/IP アドレス",
|
||||
"port": "ポート",
|
||||
"username": "ユーザー名",
|
||||
"usernamePlaceholder": "任意",
|
||||
"password": "パスワード",
|
||||
"passwordPlaceholder": "任意",
|
||||
"selectTransport": "トランスポートプロトコルを選択",
|
||||
"cameraBrand": "カメラブランド",
|
||||
"selectBrand": "URL テンプレート用のカメラブランドを選択",
|
||||
"customUrl": "カスタムストリーム URL",
|
||||
"brandInformation": "ブランド情報",
|
||||
"brandUrlFormat": "RTSP URL 形式が {{exampleUrl}} のカメラ向け",
|
||||
"customUrlPlaceholder": "rtsp://username:password@host:port/path",
|
||||
"testConnection": "接続テスト",
|
||||
"testSuccess": "接続テストに成功しました!",
|
||||
"testFailed": "接続テストに失敗しました。入力内容を確認して再試行してください。",
|
||||
"streamDetails": "ストリーム詳細",
|
||||
"warnings": {
|
||||
"noSnapshot": "設定されたストリームからスナップショットを取得できません。"
|
||||
},
|
||||
"errors": {
|
||||
"brandOrCustomUrlRequired": "ホスト/IP とブランドを選択するか、「その他」を選んでカスタム URL を指定してください",
|
||||
"nameRequired": "カメラ名は必須です",
|
||||
"nameLength": "カメラ名は64文字以下である必要があります",
|
||||
"invalidCharacters": "カメラ名に無効な文字が含まれています",
|
||||
"nameExists": "このカメラ名は既に存在します",
|
||||
"brands": {
|
||||
"reolink-rtsp": "Reolink の RTSP は推奨されません。カメラ設定で http を有効にし、カメラウィザードを再起動することを推奨します。"
|
||||
}
|
||||
},
|
||||
"docs": {
|
||||
"reolink": "https://docs.frigate.video/configuration/camera_specific.html#reolink-cameras"
|
||||
}
|
||||
},
|
||||
"step2": {
|
||||
"description": "ストリームのロールを設定し、必要に応じて追加ストリームを登録します。",
|
||||
"streamsTitle": "カメラストリーム",
|
||||
"addStream": "ストリームを追加",
|
||||
"addAnotherStream": "ストリームをさらに追加",
|
||||
"streamTitle": "ストリーム {{number}}",
|
||||
"streamUrl": "ストリーム URL",
|
||||
"streamUrlPlaceholder": "rtsp://username:password@host:port/path",
|
||||
"url": "URL",
|
||||
"resolution": "解像度",
|
||||
"selectResolution": "解像度を選択",
|
||||
"quality": "品質",
|
||||
"selectQuality": "品質を選択",
|
||||
"roles": "ロール",
|
||||
"roleLabels": {
|
||||
"detect": "物体検出",
|
||||
"record": "録画",
|
||||
"audio": "音声"
|
||||
},
|
||||
"testStream": "接続テスト",
|
||||
"testSuccess": "ストリームテストに成功しました!",
|
||||
"testFailed": "ストリームテストに失敗しました",
|
||||
"testFailedTitle": "テスト失敗",
|
||||
"connected": "接続済み",
|
||||
"notConnected": "未接続",
|
||||
"featuresTitle": "機能",
|
||||
"go2rtc": "カメラへの接続数を削減",
|
||||
"detectRoleWarning": "\"detect\" ロールを持つストリームが少なくとも1つ必要です。",
|
||||
"rolesPopover": {
|
||||
"title": "ストリームロール",
|
||||
"detect": "物体検出のメインフィード。",
|
||||
"record": "設定に基づいて映像フィードのセグメントを保存します。",
|
||||
"audio": "音声検出用のフィード。"
|
||||
},
|
||||
"featuresPopover": {
|
||||
"title": "ストリーム機能",
|
||||
"description": "go2rtc のリストリーミングを使用してカメラへの接続数を削減します。"
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraManagement": {
|
||||
"title": "カメラ管理",
|
||||
"addCamera": "新しいカメラを追加",
|
||||
"editCamera": "カメラを編集:",
|
||||
"selectCamera": "カメラを選択",
|
||||
"backToSettings": "カメラ設定に戻る",
|
||||
"streams": {
|
||||
"title": "カメラの有効化/無効化",
|
||||
"desc": "Frigate を再起動するまで一時的にカメラを無効化します。無効化すると、このカメラのストリーム処理は完全に停止し、検出・録画・デバッグは利用できません。<br /> <em>注: これは go2rtc のリストリームを無効にはしません。</em>"
|
||||
},
|
||||
"cameraConfig": {
|
||||
"add": "カメラを追加",
|
||||
"edit": "カメラを編集",
|
||||
"description": "ストリーム入力とロールを含むカメラ設定を構成します。",
|
||||
"name": "カメラ名",
|
||||
"nameRequired": "カメラ名は必須です",
|
||||
"nameLength": "カメラ名は64文字未満である必要があります。",
|
||||
"namePlaceholder": "例: front_door または Back Yard Overview",
|
||||
"enabled": "有効",
|
||||
"ffmpeg": {
|
||||
"inputs": "入力ストリーム",
|
||||
"path": "ストリームパス",
|
||||
"pathRequired": "ストリームパスは必須です",
|
||||
"pathPlaceholder": "rtsp://...",
|
||||
"roles": "ロール",
|
||||
"rolesRequired": "少なくとも1つのロールが必要です",
|
||||
"rolesUnique": "各ロール(audio、detect、record)は1つのストリームにのみ割り当て可能です",
|
||||
"addInput": "入力ストリームを追加",
|
||||
"removeInput": "入力ストリームを削除",
|
||||
"inputsRequired": "少なくとも1つの入力ストリームが必要です"
|
||||
},
|
||||
"go2rtcStreams": "go2rtc ストリーム",
|
||||
"streamUrls": "ストリーム URL",
|
||||
"addUrl": "URL を追加",
|
||||
"addGo2rtcStream": "go2rtc ストリームを追加",
|
||||
"toast": {
|
||||
"success": "カメラ {{cameraName}} を保存しました"
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameraReview": {
|
||||
"title": "カメラレビュー設定",
|
||||
"object_descriptions": {
|
||||
"title": "生成AIによるオブジェクト説明",
|
||||
"desc": "このカメラに対する生成AIのオブジェクト説明を一時的に有効/無効にします。無効にすると、このカメラの追跡オブジェクトについてAI生成の説明は要求されません。"
|
||||
},
|
||||
"review_descriptions": {
|
||||
"title": "生成AIによるレビュー説明",
|
||||
"desc": "このカメラに対する生成AIのレビュー説明を一時的に有効/無効にします。無効にすると、このカメラのレビュー項目についてAI生成の説明は要求されません。"
|
||||
},
|
||||
"review": {
|
||||
"title": "レビュー",
|
||||
"desc": "Frigate を再起動するまで、このカメラのアラートと検出を一時的に有効/無効にします。無効にすると、新しいレビュー項目は生成されません。 ",
|
||||
"alerts": "アラート ",
|
||||
"detections": "検出 "
|
||||
},
|
||||
"reviewClassification": {
|
||||
"title": "レビュー分類",
|
||||
"desc": "Frigate はレビュー項目をアラートと検出に分類します。既定では、すべての <em>person</em> と <em>car</em> オブジェクトはアラートとして扱われます。必須ゾーンを設定することで、分類をより細かく調整できます。",
|
||||
"noDefinedZones": "このカメラにはゾーンが定義されていません。",
|
||||
"objectAlertsTips": "すべての {{alertsLabels}} オブジェクトは {{cameraName}} でアラートとして表示されます。",
|
||||
"zoneObjectAlertsTips": "{{cameraName}} の {{zone}} で検出されたすべての {{alertsLabels}} オブジェクトはアラートとして表示されます。",
|
||||
"objectDetectionsTips": "{{cameraName}} で分類されていないすべての {{detectionsLabels}} オブジェクトは、どのゾーンにあっても検出として表示されます。",
|
||||
"zoneObjectDetectionsTips": {
|
||||
"text": "{{cameraName}} の {{zone}} で分類されていないすべての {{detectionsLabels}} オブジェクトは検出として表示されます。",
|
||||
"notSelectDetections": "{{cameraName}} の {{zone}} で検出され、アラートに分類されなかったすべての {{detectionsLabels}} オブジェクトは、ゾーンに関係なく検出として表示されます。",
|
||||
"regardlessOfZoneObjectDetectionsTips": "{{cameraName}} で分類されていないすべての {{detectionsLabels}} オブジェクトは、どのゾーンにあっても検出として表示されます。"
|
||||
},
|
||||
"unsavedChanges": "未保存のレビュー分類設定({{camera}})",
|
||||
"selectAlertsZones": "アラート用のゾーンを選択",
|
||||
"selectDetectionsZones": "検出用のゾーンを選択",
|
||||
"limitDetections": "特定のゾーンに検出を限定する",
|
||||
"toast": {
|
||||
"success": "レビュー分類の設定を保存しました。変更を適用するには Frigate を再起動してください。"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"cameras": "カメラ統計 - Frigate",
|
||||
"general": "一般統計 - Frigate",
|
||||
"storage": "ストレージ統計 - Frigate",
|
||||
"enrichments": "エンリッチメント統計 - Frigate",
|
||||
"enrichments": "高度解析統計 - Frigate",
|
||||
"logs": {
|
||||
"frigate": "Frigate ログ - Frigate",
|
||||
"go2rtc": "Go2RTC ログ - Frigate",
|
||||
@@ -38,7 +38,7 @@
|
||||
"general": {
|
||||
"title": "全般",
|
||||
"detector": {
|
||||
"title": "ディテクタ",
|
||||
"title": "検出器",
|
||||
"inferenceSpeed": "ディテクタ推論速度",
|
||||
"temperature": "ディテクタ温度",
|
||||
"cpuUsage": "ディテクタの CPU 使用率",
|
||||
@@ -167,7 +167,7 @@
|
||||
"shmTooLow": "/dev/shm の割り当て({{total}} MB)は少なくとも {{min}} MB に増やす必要があります。"
|
||||
},
|
||||
"enrichments": {
|
||||
"title": "エンリッチメント",
|
||||
"title": "高度解析",
|
||||
"infPerSecond": "毎秒推論回数",
|
||||
"embeddings": {
|
||||
"image_embedding": "画像埋め込み",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user