* Remove tracked object update resets

* Adjust face blur reduction

* Add space for config editor buttons

* Slight adjustment

* Fix double thats

* update icons

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
Nicolas Mowen
2025-06-19 09:20:33 -06:00
committed by GitHub
parent 42cae5d9ee
commit fe571dc217
7 changed files with 26 additions and 41 deletions

View File

@@ -23,7 +23,7 @@ If you are using go2rtc, you should adjust the following settings in your camera
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below). - Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio. - Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well. - I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well.
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information. The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.

View File

@@ -108,21 +108,24 @@ class FaceRecognizer(ABC):
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
) )
def get_blur_confidence_reduction(self, input: np.ndarray) -> tuple[float, float]: def get_blur_confidence_reduction(self, input: np.ndarray) -> float:
"""Calculates the reduction in confidence based on the blur of the image.""" """Calculates the reduction in confidence based on the blur of the image."""
if not self.config.face_recognition.blur_confidence_filter: if not self.config.face_recognition.blur_confidence_filter:
return 0, 0.0 return 0.0
variance = cv2.Laplacian(input, cv2.CV_64F).var() variance = cv2.Laplacian(input, cv2.CV_64F).var()
logger.debug(f"face detected with blurriness {variance}")
if variance < 80: # image is very blurry if variance < 120: # image is very blurry
return variance, 0.05 return 0.06
elif variance < 100: # image moderately blurry elif variance < 160: # image moderately blurry
return variance, 0.03 return 0.04
elif variance < 150: # image is slightly blurry elif variance < 200: # image is slightly blurry
return variance, 0.01 return 0.02
elif variance < 250: # image is mostly clear
return 0.01
else: else:
return variance, 0.0 return 0.0
def similarity_to_confidence( def similarity_to_confidence(
@@ -234,8 +237,7 @@ class FaceNetRecognizer(FaceRecognizer):
# face recognition is best run on grayscale images # face recognition is best run on grayscale images
# get blur factor before aligning face # get blur factor before aligning face
variance, blur_reduction = self.get_blur_confidence_reduction(face_image) blur_reduction = self.get_blur_confidence_reduction(face_image)
logger.debug(f"face detected with blurriness {variance}")
# align face and run recognition # align face and run recognition
img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) img = self.align_face(face_image, face_image.shape[1], face_image.shape[0])
@@ -345,8 +347,7 @@ class ArcFaceRecognizer(FaceRecognizer):
# face recognition is best run on grayscale images # face recognition is best run on grayscale images
# get blur reduction before aligning face # get blur reduction before aligning face
variance, blur_reduction = self.get_blur_confidence_reduction(face_image) blur_reduction = self.get_blur_confidence_reduction(face_image)
logger.debug(f"face detected with blurriness {variance}")
# align face and run recognition # align face and run recognition
img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) img = self.align_face(face_image, face_image.shape[1], face_image.shape[0])

View File

@@ -1579,19 +1579,6 @@ class LicensePlateProcessingMixin:
if object_id in self.camera_current_cars.get(camera, []): if object_id in self.camera_current_cars.get(camera, []):
self.camera_current_cars[camera].remove(object_id) self.camera_current_cars[camera].remove(object_id)
if len(self.camera_current_cars[camera]) == 0:
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.lpr,
"name": None,
"plate": None,
"camera": camera,
}
),
)
class CTCDecoder: class CTCDecoder:
""" """

View File

@@ -444,18 +444,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
if object_id in self.camera_current_people.get(camera, []): if object_id in self.camera_current_people.get(camera, []):
self.camera_current_people[camera].remove(object_id) self.camera_current_people[camera].remove(object_id)
if len(self.camera_current_people[camera]) == 0:
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.face,
"name": None,
"camera": camera,
}
),
)
def weighted_average( def weighted_average(
self, results_list: list[tuple[str, float, int]], max_weight: int = 4000 self, results_list: list[tuple[str, float, int]], max_weight: int = 4000
): ):

View File

@@ -339,7 +339,7 @@
"clickDrawPolygon": "Click to draw a polygon on the image.", "clickDrawPolygon": "Click to draw a polygon on the image.",
"objects": { "objects": {
"title": "Objects", "title": "Objects",
"desc": "The object type that that applies to this object mask.", "desc": "The object type that applies to this object mask.",
"allObjectTypes": "All object types" "allObjectTypes": "All object types"
}, },
"toast": { "toast": {

View File

@@ -265,7 +265,7 @@ function ConfigEditor() {
</div> </div>
</div> </div>
<div className="flex flex-1 flex-col overflow-hidden"> <div className="mt-2 flex flex-1 flex-col overflow-hidden">
{error && ( {error && (
<div className="mt-2 max-h-[30%] min-h-[2.5rem] overflow-auto whitespace-pre-wrap border-2 border-muted bg-background_alt p-4 text-sm text-danger md:max-h-[40%]"> <div className="mt-2 max-h-[30%] min-h-[2.5rem] overflow-auto whitespace-pre-wrap border-2 border-muted bg-background_alt p-4 text-sm text-danger md:max-h-[40%]">
{error} {error}

View File

@@ -26,11 +26,13 @@ import {
GiDeer, GiDeer,
GiFox, GiFox,
GiGoat, GiGoat,
GiKangaroo,
GiPolarBear, GiPolarBear,
GiPostStamp, GiPostStamp,
GiRabbit, GiRabbit,
GiRaccoonHead, GiRaccoonHead,
GiSailboat, GiSailboat,
GiSquirrel,
} from "react-icons/gi"; } from "react-icons/gi";
import { LuBox, LuLassoSelect, LuScanBarcode } from "react-icons/lu"; import { LuBox, LuLassoSelect, LuScanBarcode } from "react-icons/lu";
import * as LuIcons from "react-icons/lu"; import * as LuIcons from "react-icons/lu";
@@ -72,6 +74,7 @@ export function getIconForLabel(label: string, className?: string) {
case "boat": case "boat":
return <GiSailboat key={label} className={className} />; return <GiSailboat key={label} className={className} />;
case "bus": case "bus":
case "school_bus":
return <FaBus key={label} className={className} />; return <FaBus key={label} className={className} />;
case "car": case "car":
case "vehicle": case "vehicle":
@@ -90,6 +93,8 @@ export function getIconForLabel(label: string, className?: string) {
return <GiGoat key={label} className={className} />; return <GiGoat key={label} className={className} />;
case "horse": case "horse":
return <FaHorse key={label} className={className} />; return <FaHorse key={label} className={className} />;
case "kangaroo":
return <GiKangaroo key={label} className={className} />;
case "license_plate": case "license_plate":
return <LuScanBarcode key={label} className={className} />; return <LuScanBarcode key={label} className={className} />;
case "motorcycle": case "motorcycle":
@@ -108,6 +113,8 @@ export function getIconForLabel(label: string, className?: string) {
return <FaHockeyPuck key={label} className={className} />; return <FaHockeyPuck key={label} className={className} />;
case "sports_ball": case "sports_ball":
return <FaFootballBall key={label} className={className} />; return <FaFootballBall key={label} className={className} />;
case "skunk":
return <GiSquirrel key={label} className={className} />;
case "squirrel": case "squirrel":
return <LuIcons.LuSquirrel key={label} className={className} />; return <LuIcons.LuSquirrel key={label} className={className} />;
case "umbrella": case "umbrella":
@@ -127,12 +134,14 @@ export function getIconForLabel(label: string, className?: string) {
case "amazon": case "amazon":
return <FaAmazon key={label} className={className} />; return <FaAmazon key={label} className={className} />;
case "an_post": case "an_post":
case "canada_post":
case "dpd": case "dpd":
case "gls": case "gls":
case "nzpost": case "nzpost":
case "postnl": case "postnl":
case "postnord": case "postnord":
case "purolator": case "purolator":
case "royal_mail":
return <GiPostStamp key={label} className={className} />; return <GiPostStamp key={label} className={className} />;
case "dhl": case "dhl":
return <FaDhl key={label} className={className} />; return <FaDhl key={label} className={className} />;