Fixes (#23130)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions

* respect section hiddenFields when detecting config overrides

* change audio events to audio detection to match docs

* add field messages for object and review genai

* add more config messages

* more messages

* add guard to prevent race when adding camera dynamically

* fix duplicate websocket messages from zombie connection under react strict mode

detach ws event handlers before close() in WsProvider cleanup so a CONNECTING socket's deferred onclose can't schedule a reconnect after the next mount resets the unmounted guard, which was spawning a second live ws and duplicating every message

* fix double event publishes for stationary objects with attributes
This commit is contained in:
Josh Hawkins 2026-05-07 13:23:02 -05:00 committed by GitHub
parent d0f44de6bc
commit 4ff7ab96dc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 205 additions and 41 deletions

View File

@ -76,7 +76,7 @@ class CameraConfig(FrigateBaseModel):
# Options with global fallback
audio: AudioConfig = Field(
default_factory=AudioConfig,
title="Audio events",
title="Audio detection",
description="Settings for audio-based event detection for this camera.",
)
audio_transcription: CameraAudioTranscriptionConfig = Field(

View File

@ -477,7 +477,7 @@ class FrigateConfig(FrigateBaseModel):
cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras")
audio: AudioConfig = Field(
default_factory=AudioConfig,
title="Audio events",
title="Audio detection",
description="Settings for audio-based event detection for all cameras; can be overridden per-camera.",
)
birdseye: BirdseyeConfig = Field(

View File

@ -773,7 +773,9 @@ class TrackedObjectProcessor(threading.Thread):
logger.debug(f"Camera {camera} disabled, skipping update")
continue
camera_state = self.camera_states[camera]
camera_state = self.camera_states.get(camera)
if camera_state is None:
continue
camera_state.update(
frame_name, frame_time, current_tracked_objects, motion_boxes, regions

View File

@ -330,7 +330,12 @@ class TrackedObject:
if self.obj_data["position_changes"] != obj_data["position_changes"]:
significant_change = True
if self.obj_data["attributes"] != obj_data["attributes"]:
# disappearance of a per-frame attribute can be caused by detection
# skipping the object on a frame (stationary objects on non-interval
# frames), so only flag when a new attribute label appears
prev_labels = {a["label"] for a in self.obj_data["attributes"]}
curr_labels = {a["label"] for a in obj_data["attributes"]}
if curr_labels - prev_labels:
significant_change = True
# if the state changed between stationary and active

View File

@ -438,34 +438,32 @@ def process_frames(
else:
object_tracker.update_frame_times(frame_name, frame_time)
# group the attribute detections based on what label they apply to
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
for label, attribute_labels in attributes_map.items():
attribute_detections[label] = [
TrackedObjectAttribute(d)
for d in consolidated_detections
if d[0] in attribute_labels
]
# build detections
detections = {}
for obj in object_tracker.tracked_objects.values():
detections[obj["id"]] = {**obj, "attributes": []}
# find the best object for each attribute to be assigned to
# assign each detected attribute to the best matching object.
# iterate consolidated_detections once so attributes that appear under
# multiple parent labels in attributes_map (e.g. license_plate is in
# both "car" and "motorcycle") are not appended more than once
all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values()
for attributes in attribute_detections.values():
for attribute in attributes:
filtered_objects = filter(
lambda o: attribute.label in attributes_map.get(o["label"], []),
all_objects,
)
selected_object_id = attribute.find_best_object(filtered_objects)
detected_attributes = [
TrackedObjectAttribute(d)
for d in consolidated_detections
if d[0] in all_attributes
]
for attribute in detected_attributes:
filtered_objects = filter(
lambda o: attribute.label in attributes_map.get(o["label"], []),
all_objects,
)
selected_object_id = attribute.find_best_object(filtered_objects)
if selected_object_id is not None:
detections[selected_object_id]["attributes"].append(
attribute.get_tracking_data()
)
if selected_object_id is not None:
detections[selected_object_id]["attributes"].append(
attribute.get_tracking_data()
)
# debug object tracking
if False:

View File

@ -13,7 +13,7 @@
"description": "Enabled"
},
"audio": {
"label": "Audio events",
"label": "Audio detection",
"description": "Settings for audio-based event detection for this camera.",
"enabled": {
"label": "Enable audio detection",

View File

@ -539,7 +539,7 @@
}
},
"audio": {
"label": "Audio events",
"label": "Audio detection",
"description": "Settings for audio-based event detection for all cameras; can be overridden per-camera.",
"enabled": {
"label": "Enable audio detection",

View File

@ -49,7 +49,7 @@
"globalMotion": "Motion detection",
"globalObjects": "Objects",
"globalReview": "Review",
"globalAudioEvents": "Audio events",
"globalAudioEvents": "Audio detection",
"globalLivePlayback": "Live playback",
"globalTimestampStyle": "Timestamp style",
"systemDatabase": "Database",
@ -80,7 +80,7 @@
"cameraMotion": "Motion detection",
"cameraObjects": "Objects",
"cameraConfigReview": "Review",
"cameraAudioEvents": "Audio events",
"cameraAudioEvents": "Audio detection",
"cameraAudioTranscription": "Audio transcription",
"cameraNotifications": "Notifications",
"cameraLivePlayback": "Live playback",
@ -1651,7 +1651,8 @@
"review": {
"recordDisabled": "Recording is disabled, review items will not be generated.",
"detectDisabled": "Object detection is disabled. Review items require detected objects to categorize alerts and detections.",
"allNonAlertDetections": "All non-alert activity will be included as detections."
"allNonAlertDetections": "All non-alert activity will be included as detections.",
"genaiImageSourceRecordingsRecordDisabled": "Image source is set to 'recordings', but recording is disabled. Frigate will fall back to preview images."
},
"audio": {
"noAudioRole": "No streams have the audio role defined. You must enable the audio role for audio detection to function."
@ -1660,15 +1661,21 @@
"audioDetectionDisabled": "Audio detection is not enabled for this camera. Audio transcription requires audio detection to be active."
},
"detect": {
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit."
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit.",
"disabled": "Object detection is disabled. Snapshots, review items, and enrichments such as face recognition, license plate recognition, and Generative AI will not function."
},
"objects": {
"genaiNoDescriptionsProvider": "You must configure a GenAI provider with the 'descriptions' role for descriptions to be generated."
},
"faceRecognition": {
"globalDisabled": "The face recognition enrichment must be enabled for face recognition features to function on this camera.",
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Enable 'person' in Objects for this camera."
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Enable 'person' in Objects for this camera.",
"modelSizeLarge": "The 'large' model requires a GPU or NPU for reasonable performance. Use 'small' on CPU-only systems."
},
"lpr": {
"globalDisabled": "The license plate recognition enrichment must be enabled for LPR features to function on this camera.",
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked. Enable 'car' or 'motorcycle' in Objects for this camera."
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked. Enable 'car' or 'motorcycle' in Objects for this camera.",
"modelSizeLarge": "The 'large' model is optimized for multi-line license plates. The 'small' model provides better performance over 'large' and should be used unless your region uses multi-line plate formats."
},
"record": {
"noRecordRole": "No streams have the record role defined. Recording will not function."
@ -1682,6 +1689,9 @@
"detectors": {
"mixedTypes": "All detectors must use the same type. Remove existing detectors to use a different type.",
"mixedTypesSuggestion": "All detectors must use the same type. Remove existing detectors or select {{type}}."
},
"semanticSearch": {
"jinav2SmallModelSize": "The 'small' size with the Jina V2 model has high RAM and inference cost. The 'large' model with a discrete GPU is recommended."
}
}
}

View File

@ -56,7 +56,14 @@ export function WsProvider({ children }: { children: ReactNode }) {
if (reconnectTimer.current) {
clearTimeout(reconnectTimer.current);
}
wsRef.current?.close();
const ws = wsRef.current;
if (ws) {
ws.onopen = null;
ws.onmessage = null;
ws.onclose = null;
ws.onerror = null;
ws.close();
}
resetWsStore();
};
}, [wsUrl]);

View File

@ -3,6 +3,15 @@ import type { SectionConfigOverrides } from "./types";
const detect: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/camera_specific",
messages: [
{
key: "detect-disabled",
messageKey: "configMessages.detect.disabled",
severity: "info",
condition: (ctx) =>
ctx.level === "camera" && ctx.formData?.enabled === false,
},
],
fieldMessages: [
{
key: "fps-greater-than-five",

View File

@ -53,6 +53,16 @@ const faceRecognition: SectionConfigOverrides = {
"device",
],
restartRequired: ["enabled", "model_size", "device"],
fieldMessages: [
{
key: "model-size-large",
field: "model_size",
messageKey: "configMessages.faceRecognition.modelSizeLarge",
severity: "info",
position: "after",
condition: (ctx) => ctx.formData?.model_size === "large",
},
],
uiSchema: {
model_size: {
"ui:options": { size: "xs" },

View File

@ -65,6 +65,16 @@ const lpr: SectionConfigOverrides = {
"replace_rules",
],
restartRequired: ["model_size", "enhancement", "device"],
fieldMessages: [
{
key: "model-size-large",
field: "model_size",
messageKey: "configMessages.lpr.modelSizeLarge",
severity: "info",
position: "after",
condition: (ctx) => ctx.formData?.model_size === "large",
},
],
uiSchema: {
format: {
"ui:options": { size: "md" },

View File

@ -11,6 +11,32 @@ const hideAttributeFilters = (config: FrigateConfig): string[] =>
const objects: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/object_filters",
messages: [
{
key: "detect-disabled",
messageKey: "configMessages.detect.disabled",
severity: "info",
condition: (ctx) =>
ctx.level === "camera" &&
ctx.fullCameraConfig?.detect?.enabled === false,
},
],
fieldMessages: [
{
key: "genai-no-descriptions-provider",
field: "genai.enabled",
messageKey: "configMessages.objects.genaiNoDescriptionsProvider",
severity: "warning",
position: "before",
condition: (ctx) => {
const providers = ctx.fullConfig.genai;
if (!providers || Object.keys(providers).length === 0) return true;
return !Object.values(providers).some((agent) =>
agent.roles?.includes("descriptions"),
);
},
},
],
fieldDocs: {
"filters.min_area": "/configuration/object_filters#object-area",
"filters.max_area": "/configuration/object_filters#object-area",

View File

@ -41,6 +41,38 @@ const review: SectionConfigOverrides = {
return !Array.isArray(labels) || labels.length === 0;
},
},
{
key: "genai-no-descriptions-provider",
field: "genai.enabled",
messageKey: "configMessages.objects.genaiNoDescriptionsProvider",
severity: "warning",
position: "before",
condition: (ctx) => {
const providers = ctx.fullConfig.genai;
if (!providers || Object.keys(providers).length === 0) return true;
return !Object.values(providers).some((agent) =>
agent.roles?.includes("descriptions"),
);
},
},
{
key: "genai-image-source-recordings-record-disabled",
field: "genai.image_source",
messageKey:
"configMessages.review.genaiImageSourceRecordingsRecordDisabled",
severity: "warning",
position: "after",
condition: (ctx) => {
const genai = ctx.formData?.genai as
| Record<string, unknown>
| undefined;
if (genai?.image_source !== "recordings") return false;
if (ctx.level === "camera" && ctx.fullCameraConfig) {
return ctx.fullCameraConfig.record?.enabled === false;
}
return ctx.fullConfig.record?.enabled === false;
},
},
],
fieldDocs: {
"alerts.labels": "/configuration/review/#alerts-and-detections",

View File

@ -18,6 +18,18 @@ const semanticSearch: SectionConfigOverrides = {
advancedFields: ["reindex", "device"],
restartRequired: ["enabled", "model", "model_size", "device"],
hiddenFields: ["reindex"],
fieldMessages: [
{
key: "jinav2-small-model-size",
field: "model_size",
messageKey: "configMessages.semanticSearch.jinav2SmallModelSize",
severity: "warning",
position: "after",
condition: (ctx) =>
ctx.formData?.model === "jinav2" &&
ctx.formData?.model_size === "small",
},
],
uiSchema: {
model: {
"ui:widget": "semanticSearchModel",

View File

@ -1,6 +1,7 @@
// Hook to detect when camera config overrides global defaults
import { useMemo } from "react";
import useSWR from "swr";
import cloneDeep from "lodash/cloneDeep";
import isEqual from "lodash/isEqual";
import get from "lodash/get";
import set from "lodash/set";
@ -8,7 +9,11 @@ import type { RJSFSchema } from "@rjsf/utils";
import { FrigateConfig } from "@/types/frigateConfig";
import { JsonObject, JsonValue } from "@/types/configForm";
import { isJsonObject } from "@/lib/utils";
import { getBaseCameraSectionValue } from "@/utils/configUtil";
import {
getBaseCameraSectionValue,
getEffectiveHiddenFields,
unsetWithWildcard,
} from "@/utils/configUtil";
import { extractSectionSchema } from "@/hooks/use-config-schema";
import { applySchemaDefaults } from "@/lib/config-schema";
@ -38,6 +43,21 @@ export function normalizeConfigValue(value: unknown): JsonValue {
return stripInternalFields(value as JsonValue);
}
/**
* Remove hidden-field paths from a value before comparison so fields the
* user can't change in the UI (e.g. motion masks, attribute filters) don't
* trigger override badges. Operates on a clone so the input is unchanged.
*/
function stripHiddenPaths(value: JsonValue, hiddenFields: string[]): JsonValue {
if (hiddenFields.length === 0 || !isJsonObject(value)) return value;
const cloned = cloneDeep(value) as JsonObject;
for (const path of hiddenFields) {
if (!path) continue;
unsetWithWildcard(cloned as Record<string, unknown>, path);
}
return cloned;
}
/**
* Collapse null and empty-object values for override comparisons so
* semantically equivalent shapes match. The schema may default `mask: None`
@ -45,7 +65,7 @@ export function normalizeConfigValue(value: unknown): JsonValue {
* masks", so collapsing them here keeps the equality check honest. We
* keep this off the public `normalizeConfigValue` so save-flow code paths
* (which serialize form data) aren't affected.
*/
**/
function collapseEmpty(value: JsonValue): JsonValue {
if (Array.isArray(value)) {
return value.map(collapseEmpty);
@ -202,8 +222,21 @@ export function useConfigOverride({
// Collapse empty/null values for comparison so semantically equivalent
// shapes (e.g. schema default `mask: null` vs runtime `mask: {}`) match.
const collapsedGlobal = collapseEmpty(normalizedGlobalValue);
const collapsedCamera = collapseEmpty(normalizedCameraValue);
// Also strip hidden-field paths (motion masks, attribute filters, etc.)
// so fields the user can't edit in the UI don't trigger override badges.
const hiddenFields = getEffectiveHiddenFields(
sectionPath,
"camera",
config,
);
const collapsedGlobal = stripHiddenPaths(
collapseEmpty(normalizedGlobalValue),
hiddenFields,
);
const collapsedCamera = stripHiddenPaths(
collapseEmpty(normalizedCameraValue),
hiddenFields,
);
const comparisonGlobal = compareFields
? pickFields(collapsedGlobal, compareFields)
@ -328,8 +361,15 @@ export function useAllCameraOverrides(
getBaseCameraSectionValue(config, cameraName, key),
);
const collapsedGlobal = collapseEmpty(globalValue);
const collapsedCamera = collapseEmpty(cameraValue);
const hiddenFields = getEffectiveHiddenFields(key, "camera", config);
const collapsedGlobal = stripHiddenPaths(
collapseEmpty(globalValue),
hiddenFields,
);
const collapsedCamera = stripHiddenPaths(
collapseEmpty(cameraValue),
hiddenFields,
);
const comparisonGlobal = compareFields
? pickFields(collapsedGlobal, compareFields)
: collapsedGlobal;

View File

@ -254,7 +254,10 @@ export function flattenOverrides(
// lodash `unset` treats `*` as a literal key. This helper expands wildcard
// segments so that e.g. `"filters.*.mask"` unsets `filters.<each key>.mask`.
function unsetWithWildcard(obj: Record<string, unknown>, path: string): void {
export function unsetWithWildcard(
obj: Record<string, unknown>,
path: string,
): void {
if (!path.includes("*")) {
unset(obj, path);
return;