diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index d7bc37ace..65f6e5442 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -359,10 +359,14 @@ The YOLOv9 license plate detector model will run (and the metric will appear) if If you are detecting `car` or `motorcycle` on cameras where you don't want to run LPR, make sure you disable LPR it at the camera level. And if you do want to run LPR on those cameras, make sure you define `license_plate` as an object to track. -### It looks like Frigate picked up my camera's timestamp as the license plate. How can I prevent this? +### It looks like Frigate picked up my camera's timestamp or overlay text as the license plate. How can I prevent this? -This could happen if cars or motorcycles travel close to your camera's timestamp. You could either move the timestamp through your camera's firmware, or apply a mask to it in Frigate. +This could happen if cars or motorcycles travel close to your camera's timestamp or overlay text. You could either move the text through your camera's firmware, or apply a mask to it in Frigate. -If you are using a model that natively detects `license_plate`, add an _object mask_ of type `license_plate` and a _motion mask_ over your timestamp. +If you are using a model that natively detects `license_plate`, add an _object mask_ of type `license_plate` and a _motion mask_ over your text. -If you are using dedicated LPR camera mode, only a _motion mask_ over your timestamp is required. +If you are not using a model that natively detects `license_plate` or you are using dedicated LPR camera mode, only a _motion mask_ over your text is required. + +### I see "Error running ... model" in my logs. How can I fix this? + +This usually happens when your GPU is unable to compile or use one of the LPR models. Set your `device` to `CPU` and try again. GPU acceleration only provides a slight performance increase, and the models are lightweight enough to run without issue on most CPUs. diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index 0228f17d8..cd065ede5 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -2926,6 +2926,8 @@ paths: tags: - Media summary: Recording Clip + description: >- + For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files. operationId: recording_clip__camera_name__start__start_ts__end__end_ts__clip_mp4_get parameters: - name: camera_name diff --git a/frigate/api/media.py b/frigate/api/media.py index 27d87770a..476c8349f 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -541,7 +541,10 @@ def recordings( return JSONResponse(content=list(recordings)) -@router.get("/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4") +@router.get( + "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", + description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.", +) def recording_clip( request: Request, camera_name: str, diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 8dea639d6..f4ff08644 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -79,7 +79,12 @@ class LicensePlateProcessingMixin: resized_image, ) - outputs = self.model_runner.detection_model([normalized_image])[0] + try: + outputs = self.model_runner.detection_model([normalized_image])[0] + except Exception as e: + logger.warning(f"Error running LPR box detection model: {e}") + return [] + outputs = outputs[0, :, :] if False: @@ -115,7 +120,11 @@ class LicensePlateProcessingMixin: norm_img = norm_img[np.newaxis, :] norm_images.append(norm_img) - outputs = self.model_runner.classification_model(norm_images) + try: + outputs = self.model_runner.classification_model(norm_images) + except Exception as e: + logger.warning(f"Error running LPR classification model: {e}") + return return self._process_classification_output(images, outputs) @@ -152,7 +161,10 @@ class LicensePlateProcessingMixin: norm_image = norm_image[np.newaxis, :] norm_images.append(norm_image) - outputs = self.model_runner.recognition_model(norm_images) + try: + outputs = self.model_runner.recognition_model(norm_images) + except Exception as e: + logger.warning(f"Error running LPR recognition model: {e}") return self.ctc_decoder(outputs) def _process_license_plate( @@ -968,7 +980,11 @@ class LicensePlateProcessingMixin: Return the dimensions of the detected plate as [x1, y1, x2, y2]. """ - predictions = self.model_runner.yolov9_detection_model(input) + try: + predictions = self.model_runner.yolov9_detection_model(input) + except Exception as e: + logger.warning(f"Error running YOLOv9 license plate detection model: {e}") + return None confidence_threshold = self.lpr_config.detection_threshold @@ -1281,6 +1297,10 @@ class LicensePlateProcessingMixin: return rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) + + # apply motion mask + rgb[self.config.cameras[camera].motion.mask == 0] = [0, 0, 0] + left, top, right, bottom = car_box car = rgb[top:bottom, left:right] diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 289ff40cf..5ba744f15 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -29,7 +29,8 @@ }, "train": { "title": "Train", - "aria": "Select train" + "aria": "Select train", + "empty": "There are no recent face recognition attempts" }, "selectItem": "Select {{item}}", "selectFace": "Select Face", diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx index d1917cf05..7a144b53a 100644 --- a/web/src/components/overlay/detail/SearchDetailDialog.tsx +++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx @@ -1234,55 +1234,58 @@ export function VideoTab({ search }: VideoTabProps) { const source = `${baseUrl}vod/${search.camera}/start/${search.start_time}/end/${endTime}/index.m3u8`; return ( - - {reviewItem && ( -
- - - { - if (reviewItem?.id) { - const params = new URLSearchParams({ - id: reviewItem.id, - }).toString(); - navigate(`/review?${params}`); - } - }} - > - - - - - - {t("itemMenu.viewInHistory.label")} - - - - - - - - + <> + + + {reviewItem && ( +
+ + + { + if (reviewItem?.id) { + const params = new URLSearchParams({ + id: reviewItem.id, + }).toString(); + navigate(`/review?${params}`); + } + }} + > + - - - - - {t("button.download", { ns: "common" })} - - - -
- )} - +
+ + + {t("itemMenu.viewInHistory.label")} + + +
+ + + + + + + + + + + {t("button.download", { ns: "common" })} + + + +
+ )} +
+ ); } diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 46c90214b..b3c28e7c0 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -46,6 +46,7 @@ import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isMobile } from "react-device-detect"; import { useTranslation } from "react-i18next"; import { + LuFolderCheck, LuImagePlus, LuInfo, LuPencil, @@ -69,7 +70,7 @@ export default function FaceLibrary() { document.title = t("documentTitle"); }, [t]); - const [page, setPage] = useState(); + const [page, setPage] = useState("train"); const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); // face data @@ -92,20 +93,6 @@ export default function FaceLibrary() { [faceData], ); - useEffect(() => { - if (!pageToggle) { - if (trainImages.length > 0) { - setPageToggle("train"); - } else if (faces) { - setPageToggle(faces[0]); - } - } else if (pageToggle == "train" && trainImages.length == 0) { - setPageToggle(faces[0]); - } - // we need to listen on the value of the faces list - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [trainImages, faces]); - // upload const [upload, setUpload] = useState(false); @@ -257,26 +244,29 @@ export default function FaceLibrary() { // keyboard - useKeyboardListener(["a", "Escape"], (key, modifiers) => { - if (modifiers.repeat || !modifiers.down) { - return; - } + useKeyboardListener( + page === "train" ? ["a", "Escape"] : [], + (key, modifiers) => { + if (modifiers.repeat || !modifiers.down) { + return; + } - switch (key) { - case "a": - if (modifiers.ctrl) { - if (selectedFaces.length) { - setSelectedFaces([]); - } else { - setSelectedFaces([...trainImages]); + switch (key) { + case "a": + if (modifiers.ctrl) { + if (selectedFaces.length) { + setSelectedFaces([]); + } else { + setSelectedFaces([...trainImages]); + } } - } - break; - case "Escape": - setSelectedFaces([]); - break; - } - }); + break; + case "Escape": + setSelectedFaces([]); + break; + } + }, + ); if (!config) { return ; @@ -371,7 +361,7 @@ type LibrarySelectorProps = { faceData?: FaceLibraryData; faces: string[]; trainImages: string[]; - setPageToggle: (toggle: string | undefined) => void; + setPageToggle: (toggle: string) => void; onDelete: (name: string, ids: string[], isName: boolean) => void; onRename: (old_name: string, new_name: string) => void; }; @@ -463,18 +453,16 @@ function LibrarySelector({ className="scrollbar-container max-h-[40dvh] min-w-[220px] overflow-y-auto" align="start" > - {trainImages.length > 0 && ( - setPageToggle("train")} - > -
{t("train.title")}
-
- ({trainImages.length}) -
-
- )} + setPageToggle("train")} + > +
{t("train.title")}
+
+ ({trainImages.length}) +
+
{trainImages.length > 0 && faces.length > 0 && ( <> @@ -624,6 +612,15 @@ function TrainingGrid({ config?.ui.timezone, ); + if (attemptImages.length == 0) { + return ( +
+ + {t("train.empty")} +
+ ); + } + return ( <>