Fixes (#23130)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions

* respect section hiddenFields when detecting config overrides

* change audio events to audio detection to match docs

* add field messages for object and review genai

* add more config messages

* more messages

* add guard to prevent race when adding camera dynamically

* fix duplicate websocket messages from zombie connection under react strict mode

detach ws event handlers before close() in WsProvider cleanup so a CONNECTING socket's deferred onclose can't schedule a reconnect after the next mount resets the unmounted guard, which was spawning a second live ws and duplicating every message

* fix double event publishes for stationary objects with attributes
This commit is contained in:
Josh Hawkins 2026-05-07 13:23:02 -05:00 committed by GitHub
parent d0f44de6bc
commit 4ff7ab96dc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 205 additions and 41 deletions

View File

@ -76,7 +76,7 @@ class CameraConfig(FrigateBaseModel):
# Options with global fallback # Options with global fallback
audio: AudioConfig = Field( audio: AudioConfig = Field(
default_factory=AudioConfig, default_factory=AudioConfig,
title="Audio events", title="Audio detection",
description="Settings for audio-based event detection for this camera.", description="Settings for audio-based event detection for this camera.",
) )
audio_transcription: CameraAudioTranscriptionConfig = Field( audio_transcription: CameraAudioTranscriptionConfig = Field(

View File

@ -477,7 +477,7 @@ class FrigateConfig(FrigateBaseModel):
cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras") cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras")
audio: AudioConfig = Field( audio: AudioConfig = Field(
default_factory=AudioConfig, default_factory=AudioConfig,
title="Audio events", title="Audio detection",
description="Settings for audio-based event detection for all cameras; can be overridden per-camera.", description="Settings for audio-based event detection for all cameras; can be overridden per-camera.",
) )
birdseye: BirdseyeConfig = Field( birdseye: BirdseyeConfig = Field(

View File

@ -773,7 +773,9 @@ class TrackedObjectProcessor(threading.Thread):
logger.debug(f"Camera {camera} disabled, skipping update") logger.debug(f"Camera {camera} disabled, skipping update")
continue continue
camera_state = self.camera_states[camera] camera_state = self.camera_states.get(camera)
if camera_state is None:
continue
camera_state.update( camera_state.update(
frame_name, frame_time, current_tracked_objects, motion_boxes, regions frame_name, frame_time, current_tracked_objects, motion_boxes, regions

View File

@ -330,7 +330,12 @@ class TrackedObject:
if self.obj_data["position_changes"] != obj_data["position_changes"]: if self.obj_data["position_changes"] != obj_data["position_changes"]:
significant_change = True significant_change = True
if self.obj_data["attributes"] != obj_data["attributes"]: # disappearance of a per-frame attribute can be caused by detection
# skipping the object on a frame (stationary objects on non-interval
# frames), so only flag when a new attribute label appears
prev_labels = {a["label"] for a in self.obj_data["attributes"]}
curr_labels = {a["label"] for a in obj_data["attributes"]}
if curr_labels - prev_labels:
significant_change = True significant_change = True
# if the state changed between stationary and active # if the state changed between stationary and active

View File

@ -438,34 +438,32 @@ def process_frames(
else: else:
object_tracker.update_frame_times(frame_name, frame_time) object_tracker.update_frame_times(frame_name, frame_time)
# group the attribute detections based on what label they apply to
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
for label, attribute_labels in attributes_map.items():
attribute_detections[label] = [
TrackedObjectAttribute(d)
for d in consolidated_detections
if d[0] in attribute_labels
]
# build detections # build detections
detections = {} detections = {}
for obj in object_tracker.tracked_objects.values(): for obj in object_tracker.tracked_objects.values():
detections[obj["id"]] = {**obj, "attributes": []} detections[obj["id"]] = {**obj, "attributes": []}
# find the best object for each attribute to be assigned to # assign each detected attribute to the best matching object.
# iterate consolidated_detections once so attributes that appear under
# multiple parent labels in attributes_map (e.g. license_plate is in
# both "car" and "motorcycle") are not appended more than once
all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values() all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values()
for attributes in attribute_detections.values(): detected_attributes = [
for attribute in attributes: TrackedObjectAttribute(d)
filtered_objects = filter( for d in consolidated_detections
lambda o: attribute.label in attributes_map.get(o["label"], []), if d[0] in all_attributes
all_objects, ]
) for attribute in detected_attributes:
selected_object_id = attribute.find_best_object(filtered_objects) filtered_objects = filter(
lambda o: attribute.label in attributes_map.get(o["label"], []),
all_objects,
)
selected_object_id = attribute.find_best_object(filtered_objects)
if selected_object_id is not None: if selected_object_id is not None:
detections[selected_object_id]["attributes"].append( detections[selected_object_id]["attributes"].append(
attribute.get_tracking_data() attribute.get_tracking_data()
) )
# debug object tracking # debug object tracking
if False: if False:

View File

@ -13,7 +13,7 @@
"description": "Enabled" "description": "Enabled"
}, },
"audio": { "audio": {
"label": "Audio events", "label": "Audio detection",
"description": "Settings for audio-based event detection for this camera.", "description": "Settings for audio-based event detection for this camera.",
"enabled": { "enabled": {
"label": "Enable audio detection", "label": "Enable audio detection",

View File

@ -539,7 +539,7 @@
} }
}, },
"audio": { "audio": {
"label": "Audio events", "label": "Audio detection",
"description": "Settings for audio-based event detection for all cameras; can be overridden per-camera.", "description": "Settings for audio-based event detection for all cameras; can be overridden per-camera.",
"enabled": { "enabled": {
"label": "Enable audio detection", "label": "Enable audio detection",

View File

@ -49,7 +49,7 @@
"globalMotion": "Motion detection", "globalMotion": "Motion detection",
"globalObjects": "Objects", "globalObjects": "Objects",
"globalReview": "Review", "globalReview": "Review",
"globalAudioEvents": "Audio events", "globalAudioEvents": "Audio detection",
"globalLivePlayback": "Live playback", "globalLivePlayback": "Live playback",
"globalTimestampStyle": "Timestamp style", "globalTimestampStyle": "Timestamp style",
"systemDatabase": "Database", "systemDatabase": "Database",
@ -80,7 +80,7 @@
"cameraMotion": "Motion detection", "cameraMotion": "Motion detection",
"cameraObjects": "Objects", "cameraObjects": "Objects",
"cameraConfigReview": "Review", "cameraConfigReview": "Review",
"cameraAudioEvents": "Audio events", "cameraAudioEvents": "Audio detection",
"cameraAudioTranscription": "Audio transcription", "cameraAudioTranscription": "Audio transcription",
"cameraNotifications": "Notifications", "cameraNotifications": "Notifications",
"cameraLivePlayback": "Live playback", "cameraLivePlayback": "Live playback",
@ -1651,7 +1651,8 @@
"review": { "review": {
"recordDisabled": "Recording is disabled, review items will not be generated.", "recordDisabled": "Recording is disabled, review items will not be generated.",
"detectDisabled": "Object detection is disabled. Review items require detected objects to categorize alerts and detections.", "detectDisabled": "Object detection is disabled. Review items require detected objects to categorize alerts and detections.",
"allNonAlertDetections": "All non-alert activity will be included as detections." "allNonAlertDetections": "All non-alert activity will be included as detections.",
"genaiImageSourceRecordingsRecordDisabled": "Image source is set to 'recordings', but recording is disabled. Frigate will fall back to preview images."
}, },
"audio": { "audio": {
"noAudioRole": "No streams have the audio role defined. You must enable the audio role for audio detection to function." "noAudioRole": "No streams have the audio role defined. You must enable the audio role for audio detection to function."
@ -1660,15 +1661,21 @@
"audioDetectionDisabled": "Audio detection is not enabled for this camera. Audio transcription requires audio detection to be active." "audioDetectionDisabled": "Audio detection is not enabled for this camera. Audio transcription requires audio detection to be active."
}, },
"detect": { "detect": {
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit." "fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit.",
"disabled": "Object detection is disabled. Snapshots, review items, and enrichments such as face recognition, license plate recognition, and Generative AI will not function."
},
"objects": {
"genaiNoDescriptionsProvider": "You must configure a GenAI provider with the 'descriptions' role for descriptions to be generated."
}, },
"faceRecognition": { "faceRecognition": {
"globalDisabled": "The face recognition enrichment must be enabled for face recognition features to function on this camera.", "globalDisabled": "The face recognition enrichment must be enabled for face recognition features to function on this camera.",
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Enable 'person' in Objects for this camera." "personNotTracked": "Face recognition requires the 'person' object to be tracked. Enable 'person' in Objects for this camera.",
"modelSizeLarge": "The 'large' model requires a GPU or NPU for reasonable performance. Use 'small' on CPU-only systems."
}, },
"lpr": { "lpr": {
"globalDisabled": "The license plate recognition enrichment must be enabled for LPR features to function on this camera.", "globalDisabled": "The license plate recognition enrichment must be enabled for LPR features to function on this camera.",
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked. Enable 'car' or 'motorcycle' in Objects for this camera." "vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked. Enable 'car' or 'motorcycle' in Objects for this camera.",
"modelSizeLarge": "The 'large' model is optimized for multi-line license plates. The 'small' model provides better performance over 'large' and should be used unless your region uses multi-line plate formats."
}, },
"record": { "record": {
"noRecordRole": "No streams have the record role defined. Recording will not function." "noRecordRole": "No streams have the record role defined. Recording will not function."
@ -1682,6 +1689,9 @@
"detectors": { "detectors": {
"mixedTypes": "All detectors must use the same type. Remove existing detectors to use a different type.", "mixedTypes": "All detectors must use the same type. Remove existing detectors to use a different type.",
"mixedTypesSuggestion": "All detectors must use the same type. Remove existing detectors or select {{type}}." "mixedTypesSuggestion": "All detectors must use the same type. Remove existing detectors or select {{type}}."
},
"semanticSearch": {
"jinav2SmallModelSize": "The 'small' size with the Jina V2 model has high RAM and inference cost. The 'large' model with a discrete GPU is recommended."
} }
} }
} }

View File

@ -56,7 +56,14 @@ export function WsProvider({ children }: { children: ReactNode }) {
if (reconnectTimer.current) { if (reconnectTimer.current) {
clearTimeout(reconnectTimer.current); clearTimeout(reconnectTimer.current);
} }
wsRef.current?.close(); const ws = wsRef.current;
if (ws) {
ws.onopen = null;
ws.onmessage = null;
ws.onclose = null;
ws.onerror = null;
ws.close();
}
resetWsStore(); resetWsStore();
}; };
}, [wsUrl]); }, [wsUrl]);

View File

@ -3,6 +3,15 @@ import type { SectionConfigOverrides } from "./types";
const detect: SectionConfigOverrides = { const detect: SectionConfigOverrides = {
base: { base: {
sectionDocs: "/configuration/camera_specific", sectionDocs: "/configuration/camera_specific",
messages: [
{
key: "detect-disabled",
messageKey: "configMessages.detect.disabled",
severity: "info",
condition: (ctx) =>
ctx.level === "camera" && ctx.formData?.enabled === false,
},
],
fieldMessages: [ fieldMessages: [
{ {
key: "fps-greater-than-five", key: "fps-greater-than-five",

View File

@ -53,6 +53,16 @@ const faceRecognition: SectionConfigOverrides = {
"device", "device",
], ],
restartRequired: ["enabled", "model_size", "device"], restartRequired: ["enabled", "model_size", "device"],
fieldMessages: [
{
key: "model-size-large",
field: "model_size",
messageKey: "configMessages.faceRecognition.modelSizeLarge",
severity: "info",
position: "after",
condition: (ctx) => ctx.formData?.model_size === "large",
},
],
uiSchema: { uiSchema: {
model_size: { model_size: {
"ui:options": { size: "xs" }, "ui:options": { size: "xs" },

View File

@ -65,6 +65,16 @@ const lpr: SectionConfigOverrides = {
"replace_rules", "replace_rules",
], ],
restartRequired: ["model_size", "enhancement", "device"], restartRequired: ["model_size", "enhancement", "device"],
fieldMessages: [
{
key: "model-size-large",
field: "model_size",
messageKey: "configMessages.lpr.modelSizeLarge",
severity: "info",
position: "after",
condition: (ctx) => ctx.formData?.model_size === "large",
},
],
uiSchema: { uiSchema: {
format: { format: {
"ui:options": { size: "md" }, "ui:options": { size: "md" },

View File

@ -11,6 +11,32 @@ const hideAttributeFilters = (config: FrigateConfig): string[] =>
const objects: SectionConfigOverrides = { const objects: SectionConfigOverrides = {
base: { base: {
sectionDocs: "/configuration/object_filters", sectionDocs: "/configuration/object_filters",
messages: [
{
key: "detect-disabled",
messageKey: "configMessages.detect.disabled",
severity: "info",
condition: (ctx) =>
ctx.level === "camera" &&
ctx.fullCameraConfig?.detect?.enabled === false,
},
],
fieldMessages: [
{
key: "genai-no-descriptions-provider",
field: "genai.enabled",
messageKey: "configMessages.objects.genaiNoDescriptionsProvider",
severity: "warning",
position: "before",
condition: (ctx) => {
const providers = ctx.fullConfig.genai;
if (!providers || Object.keys(providers).length === 0) return true;
return !Object.values(providers).some((agent) =>
agent.roles?.includes("descriptions"),
);
},
},
],
fieldDocs: { fieldDocs: {
"filters.min_area": "/configuration/object_filters#object-area", "filters.min_area": "/configuration/object_filters#object-area",
"filters.max_area": "/configuration/object_filters#object-area", "filters.max_area": "/configuration/object_filters#object-area",

View File

@ -41,6 +41,38 @@ const review: SectionConfigOverrides = {
return !Array.isArray(labels) || labels.length === 0; return !Array.isArray(labels) || labels.length === 0;
}, },
}, },
{
key: "genai-no-descriptions-provider",
field: "genai.enabled",
messageKey: "configMessages.objects.genaiNoDescriptionsProvider",
severity: "warning",
position: "before",
condition: (ctx) => {
const providers = ctx.fullConfig.genai;
if (!providers || Object.keys(providers).length === 0) return true;
return !Object.values(providers).some((agent) =>
agent.roles?.includes("descriptions"),
);
},
},
{
key: "genai-image-source-recordings-record-disabled",
field: "genai.image_source",
messageKey:
"configMessages.review.genaiImageSourceRecordingsRecordDisabled",
severity: "warning",
position: "after",
condition: (ctx) => {
const genai = ctx.formData?.genai as
| Record<string, unknown>
| undefined;
if (genai?.image_source !== "recordings") return false;
if (ctx.level === "camera" && ctx.fullCameraConfig) {
return ctx.fullCameraConfig.record?.enabled === false;
}
return ctx.fullConfig.record?.enabled === false;
},
},
], ],
fieldDocs: { fieldDocs: {
"alerts.labels": "/configuration/review/#alerts-and-detections", "alerts.labels": "/configuration/review/#alerts-and-detections",

View File

@ -18,6 +18,18 @@ const semanticSearch: SectionConfigOverrides = {
advancedFields: ["reindex", "device"], advancedFields: ["reindex", "device"],
restartRequired: ["enabled", "model", "model_size", "device"], restartRequired: ["enabled", "model", "model_size", "device"],
hiddenFields: ["reindex"], hiddenFields: ["reindex"],
fieldMessages: [
{
key: "jinav2-small-model-size",
field: "model_size",
messageKey: "configMessages.semanticSearch.jinav2SmallModelSize",
severity: "warning",
position: "after",
condition: (ctx) =>
ctx.formData?.model === "jinav2" &&
ctx.formData?.model_size === "small",
},
],
uiSchema: { uiSchema: {
model: { model: {
"ui:widget": "semanticSearchModel", "ui:widget": "semanticSearchModel",

View File

@ -1,6 +1,7 @@
// Hook to detect when camera config overrides global defaults // Hook to detect when camera config overrides global defaults
import { useMemo } from "react"; import { useMemo } from "react";
import useSWR from "swr"; import useSWR from "swr";
import cloneDeep from "lodash/cloneDeep";
import isEqual from "lodash/isEqual"; import isEqual from "lodash/isEqual";
import get from "lodash/get"; import get from "lodash/get";
import set from "lodash/set"; import set from "lodash/set";
@ -8,7 +9,11 @@ import type { RJSFSchema } from "@rjsf/utils";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import { JsonObject, JsonValue } from "@/types/configForm"; import { JsonObject, JsonValue } from "@/types/configForm";
import { isJsonObject } from "@/lib/utils"; import { isJsonObject } from "@/lib/utils";
import { getBaseCameraSectionValue } from "@/utils/configUtil"; import {
getBaseCameraSectionValue,
getEffectiveHiddenFields,
unsetWithWildcard,
} from "@/utils/configUtil";
import { extractSectionSchema } from "@/hooks/use-config-schema"; import { extractSectionSchema } from "@/hooks/use-config-schema";
import { applySchemaDefaults } from "@/lib/config-schema"; import { applySchemaDefaults } from "@/lib/config-schema";
@ -38,6 +43,21 @@ export function normalizeConfigValue(value: unknown): JsonValue {
return stripInternalFields(value as JsonValue); return stripInternalFields(value as JsonValue);
} }
/**
* Remove hidden-field paths from a value before comparison so fields the
* user can't change in the UI (e.g. motion masks, attribute filters) don't
* trigger override badges. Operates on a clone so the input is unchanged.
*/
function stripHiddenPaths(value: JsonValue, hiddenFields: string[]): JsonValue {
if (hiddenFields.length === 0 || !isJsonObject(value)) return value;
const cloned = cloneDeep(value) as JsonObject;
for (const path of hiddenFields) {
if (!path) continue;
unsetWithWildcard(cloned as Record<string, unknown>, path);
}
return cloned;
}
/** /**
* Collapse null and empty-object values for override comparisons so * Collapse null and empty-object values for override comparisons so
* semantically equivalent shapes match. The schema may default `mask: None` * semantically equivalent shapes match. The schema may default `mask: None`
@ -45,7 +65,7 @@ export function normalizeConfigValue(value: unknown): JsonValue {
* masks", so collapsing them here keeps the equality check honest. We * masks", so collapsing them here keeps the equality check honest. We
* keep this off the public `normalizeConfigValue` so save-flow code paths * keep this off the public `normalizeConfigValue` so save-flow code paths
* (which serialize form data) aren't affected. * (which serialize form data) aren't affected.
*/ **/
function collapseEmpty(value: JsonValue): JsonValue { function collapseEmpty(value: JsonValue): JsonValue {
if (Array.isArray(value)) { if (Array.isArray(value)) {
return value.map(collapseEmpty); return value.map(collapseEmpty);
@ -202,8 +222,21 @@ export function useConfigOverride({
// Collapse empty/null values for comparison so semantically equivalent // Collapse empty/null values for comparison so semantically equivalent
// shapes (e.g. schema default `mask: null` vs runtime `mask: {}`) match. // shapes (e.g. schema default `mask: null` vs runtime `mask: {}`) match.
const collapsedGlobal = collapseEmpty(normalizedGlobalValue); // Also strip hidden-field paths (motion masks, attribute filters, etc.)
const collapsedCamera = collapseEmpty(normalizedCameraValue); // so fields the user can't edit in the UI don't trigger override badges.
const hiddenFields = getEffectiveHiddenFields(
sectionPath,
"camera",
config,
);
const collapsedGlobal = stripHiddenPaths(
collapseEmpty(normalizedGlobalValue),
hiddenFields,
);
const collapsedCamera = stripHiddenPaths(
collapseEmpty(normalizedCameraValue),
hiddenFields,
);
const comparisonGlobal = compareFields const comparisonGlobal = compareFields
? pickFields(collapsedGlobal, compareFields) ? pickFields(collapsedGlobal, compareFields)
@ -328,8 +361,15 @@ export function useAllCameraOverrides(
getBaseCameraSectionValue(config, cameraName, key), getBaseCameraSectionValue(config, cameraName, key),
); );
const collapsedGlobal = collapseEmpty(globalValue); const hiddenFields = getEffectiveHiddenFields(key, "camera", config);
const collapsedCamera = collapseEmpty(cameraValue); const collapsedGlobal = stripHiddenPaths(
collapseEmpty(globalValue),
hiddenFields,
);
const collapsedCamera = stripHiddenPaths(
collapseEmpty(cameraValue),
hiddenFields,
);
const comparisonGlobal = compareFields const comparisonGlobal = compareFields
? pickFields(collapsedGlobal, compareFields) ? pickFields(collapsedGlobal, compareFields)
: collapsedGlobal; : collapsedGlobal;

View File

@ -254,7 +254,10 @@ export function flattenOverrides(
// lodash `unset` treats `*` as a literal key. This helper expands wildcard // lodash `unset` treats `*` as a literal key. This helper expands wildcard
// segments so that e.g. `"filters.*.mask"` unsets `filters.<each key>.mask`. // segments so that e.g. `"filters.*.mask"` unsets `filters.<each key>.mask`.
function unsetWithWildcard(obj: Record<string, unknown>, path: string): void { export function unsetWithWildcard(
obj: Record<string, unknown>,
path: string,
): void {
if (!path.includes("*")) { if (!path.includes("*")) {
unset(obj, path); unset(obj, path);
return; return;