From 224cbdc2d6cc4b9fb195a196f29c4a56d85cae28 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 21 Nov 2025 14:40:58 -0700 Subject: [PATCH] Miscellaneous Fixes (#20989) * Include DB in safe mode config Copy DB when going into safe mode to avoid creating a new one if a user has configured a separate location * Fix documentation for example log module * Set minimum duration for recording segments Due to the inpoint logic, some recordings would get clipped on the end of the segment with a non-zero duration but not enough duration to include a frame. 100 ms is a safe value for any video that is 10fps or higher to have a frame * Add docs to explain object assignment for classification * Add warning for Intel GPU stats bug Add warning with explanation on GPU stats page when all Intel GPU values are 0 * Update docs with creation instructions * reset loading state when moving through events in tracking details * disable pip on preview players * Improve HLS handling for startPosition The startPosition was incorrectly calculated assuming continuous recordings, when it needs to consider only some segments exist. This extracts that logic to a utility so all can use it. --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> --- docs/docs/configuration/advanced.md | 2 +- .../object_classification.md | 21 ++- .../state_classification.md | 18 ++- frigate/api/media.py | 7 +- frigate/config/config.py | 4 + web/public/locales/en/views/system.json | 7 +- .../overlay/detail/TrackingDetails.tsx | 43 +++--- web/src/components/player/HlsVideoPlayer.tsx | 2 + web/src/components/player/PreviewPlayer.tsx | 1 + .../player/dynamic/DynamicVideoController.ts | 41 ++---- .../player/dynamic/DynamicVideoPlayer.tsx | 123 ++++++++++-------- web/src/utils/videoUtil.ts | 54 ++++++++ web/src/views/system/GeneralMetrics.tsx | 84 +++++++++++- 13 files changed, 293 insertions(+), 114 deletions(-) diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 02482e792..78fc02884 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -25,7 +25,7 @@ Examples of available modules are: - `frigate.app` - `frigate.mqtt` -- `frigate.object_detection` +- `frigate.object_detection.base` - `detector.` - `watchdog.` - `ffmpeg..` NOTE: All FFmpeg logs are sent as `error` level. diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md index cff8a6cad..3d59b74f9 100644 --- a/docs/docs/configuration/custom_classification/object_classification.md +++ b/docs/docs/configuration/custom_classification/object_classification.md @@ -35,6 +35,15 @@ For object classification: - Ideal when multiple attributes can coexist independently. - Example: Detecting if a `person` in a construction yard is wearing a helmet or not. +## Assignment Requirements + +Sub labels and attributes are only assigned when both conditions are met: + +1. **Threshold**: Each classification attempt must have a confidence score that meets or exceeds the configured `threshold` (default: `0.8`). +2. **Class Consensus**: After at least 3 classification attempts, 60% of attempts must agree on the same class label. If the consensus class is `none`, no assignment is made. + +This two-step verification prevents false positives by requiring consistent predictions across multiple frames before assigning a sub label or attribute. + ## Example use cases ### Sub label @@ -66,14 +75,18 @@ classification: ## Training the model -Creating and training the model is done within the Frigate UI using the `Classification` page. +Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps: -### Getting Started +### Step 1: Name and Define + +Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Include a `none` class for objects that don't fit any specific category. + +### Step 2: Assign Training Examples + +The system will automatically generate example images from detected objects matching your selected label. You'll be guided through each class one at a time to select which images represent that class. Any images not assigned to a specific class will automatically be assigned to `none` when you complete the last class. Once all images are processed, training will begin automatically. When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects. -// TODO add this section once UI is implemented. Explain process of selecting objects and curating training examples. - ### Improving the Model - **Problem framing**: Keep classes visually distinct and relevant to the chosen object types. diff --git a/docs/docs/configuration/custom_classification/state_classification.md b/docs/docs/configuration/custom_classification/state_classification.md index caaeaed5a..66d3e60ca 100644 --- a/docs/docs/configuration/custom_classification/state_classification.md +++ b/docs/docs/configuration/custom_classification/state_classification.md @@ -48,13 +48,23 @@ classification: ## Training the model -Creating and training the model is done within the Frigate UI using the `Classification` page. +Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps: -### Getting Started +### Step 1: Name and Define -When choosing a portion of the camera frame for state classification, it is important to make the crop tight around the area of interest to avoid extra signals unrelated to what is being classified. +Enter a name for your model and define at least 2 classes (states) that represent mutually exclusive states. For example, `open` and `closed` for a door, or `on` and `off` for lights. -// TODO add this section once UI is implemented. Explain process of selecting a crop. +### Step 2: Select the Crop Area + +Choose one or more cameras and draw a rectangle over the area of interest for each camera. The crop should be tight around the region you want to classify to avoid extra signals unrelated to what is being classified. You can drag and resize the rectangle to adjust the crop area. + +### Step 3: Assign Training Examples + +The system will automatically generate example images from your camera feeds. You'll be guided through each class one at a time to select which images represent that state. + +**Important**: All images must be assigned to a state before training can begin. This includes images that may not be optimal, such as when people temporarily block the view, sun glare is present, or other distractions occur. Assign these images to the state that is actually present (based on what you know the state to be), not based on the distraction. This training helps the model correctly identify the state even when such conditions occur during inference. + +Once all images are assigned, training will begin automatically. ### Improving the Model diff --git a/frigate/api/media.py b/frigate/api/media.py index a8eb71ce1..372404b5a 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -849,6 +849,7 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float): clips = [] durations = [] + min_duration_ms = 100 # Minimum 100ms to ensure at least one video frame max_duration_ms = MAX_SEGMENT_DURATION * 1000 recording: Recordings @@ -866,11 +867,11 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float): if recording.end_time > end_ts: duration -= int((recording.end_time - end_ts) * 1000) - if duration <= 0: - # skip if the clip has no valid duration + if duration < min_duration_ms: + # skip if the clip has no valid duration (too short to contain frames) continue - if 0 < duration < max_duration_ms: + if min_duration_ms <= duration < max_duration_ms: clip["keyFrameDurations"] = [duration] clips.append(clip) durations.append(duration) diff --git a/frigate/config/config.py b/frigate/config/config.py index 7ce9c73b4..6342c13bf 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -792,6 +792,10 @@ class FrigateConfig(FrigateBaseModel): # copy over auth and proxy config in case auth needs to be enforced safe_config["auth"] = config.get("auth", {}) safe_config["proxy"] = config.get("proxy", {}) + + # copy over database config for auth and so a new db is not created + safe_config["database"] = config.get("database", {}) + return cls.parse_object(safe_config, **context) # Validate and return the config dict. diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index e72b993cb..73c6d65b5 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -76,7 +76,12 @@ } }, "npuUsage": "NPU Usage", - "npuMemory": "NPU Memory" + "npuMemory": "NPU Memory", + "intelGpuWarning": { + "title": "Intel GPU Stats Warning", + "message": "GPU stats unavailable", + "description": "This is a known bug in Intel's GPU stats reporting tools (intel_gpu_top) where it will break and repeatedly return a GPU usage of 0% even in cases where hardware acceleration and object detection are correctly running on the (i)GPU. This is not a Frigate bug. You can restart the host to temporarily fix the issue and confirm that the GPU is working correctly. This does not affect performance." + } }, "otherProcesses": { "title": "Other Processes", diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx index 0cd8a0d13..727dd4552 100644 --- a/web/src/components/overlay/detail/TrackingDetails.tsx +++ b/web/src/components/overlay/detail/TrackingDetails.tsx @@ -56,6 +56,7 @@ export function TrackingDetails({ const apiHost = useApiHost(); const imgRef = useRef(null); const [imgLoaded, setImgLoaded] = useState(false); + const [isVideoLoading, setIsVideoLoading] = useState(true); const [displaySource, _setDisplaySource] = useState<"video" | "image">( "video", ); @@ -70,6 +71,10 @@ export function TrackingDetails({ (event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING, ); + useEffect(() => { + setIsVideoLoading(true); + }, [event.id]); + const { data: eventSequence } = useSWR([ "timeline", { @@ -527,22 +532,28 @@ export function TrackingDetails({ )} > {displaySource == "video" && ( - + <> + setIsVideoLoading(false)} + isDetailMode={true} + camera={event.camera} + currentTimeOverride={currentTime} + /> + {isVideoLoading && ( + + )} + )} {displaySource == "image" && ( <> diff --git a/web/src/components/player/HlsVideoPlayer.tsx b/web/src/components/player/HlsVideoPlayer.tsx index 20d3dd4d7..27daecf97 100644 --- a/web/src/components/player/HlsVideoPlayer.tsx +++ b/web/src/components/player/HlsVideoPlayer.tsx @@ -130,6 +130,8 @@ export default function HlsVideoPlayer({ return; } + setLoadedMetadata(false); + const currentPlaybackRate = videoRef.current.playbackRate; if (!useHlsCompat) { diff --git a/web/src/components/player/PreviewPlayer.tsx b/web/src/components/player/PreviewPlayer.tsx index c08c73396..2a831fc6b 100644 --- a/web/src/components/player/PreviewPlayer.tsx +++ b/web/src/components/player/PreviewPlayer.tsx @@ -309,6 +309,7 @@ function PreviewVideoPlayer({ playsInline muted disableRemotePlayback + disablePictureInPicture onSeeked={onPreviewSeeked} onLoadedData={() => { if (firstLoad) { diff --git a/web/src/components/player/dynamic/DynamicVideoController.ts b/web/src/components/player/dynamic/DynamicVideoController.ts index 1afb30efa..e9da0064d 100644 --- a/web/src/components/player/dynamic/DynamicVideoController.ts +++ b/web/src/components/player/dynamic/DynamicVideoController.ts @@ -2,7 +2,10 @@ import { Recording } from "@/types/record"; import { DynamicPlayback } from "@/types/playback"; import { PreviewController } from "../PreviewPlayer"; import { TimeRange, TrackingDetailsSequence } from "@/types/timeline"; -import { calculateInpointOffset } from "@/utils/videoUtil"; +import { + calculateInpointOffset, + calculateSeekPosition, +} from "@/utils/videoUtil"; type PlayerMode = "playback" | "scrubbing"; @@ -72,38 +75,20 @@ export class DynamicVideoController { return; } - if ( - this.recordings.length == 0 || - time < this.recordings[0].start_time || - time > this.recordings[this.recordings.length - 1].end_time - ) { - this.setNoRecording(true); - return; - } - if (this.playerMode != "playback") { this.playerMode = "playback"; } - let seekSeconds = 0; - (this.recordings || []).every((segment) => { - // if the next segment is past the desired time, stop calculating - if (segment.start_time > time) { - return false; - } + const seekSeconds = calculateSeekPosition( + time, + this.recordings, + this.inpointOffset, + ); - if (segment.end_time < time) { - seekSeconds += segment.end_time - segment.start_time; - return true; - } - - seekSeconds += - segment.end_time - segment.start_time - (segment.end_time - time); - return true; - }); - - // adjust for HLS inpoint offset - seekSeconds -= this.inpointOffset; + if (seekSeconds === undefined) { + this.setNoRecording(true); + return; + } if (seekSeconds != 0) { this.playerController.currentTime = seekSeconds; diff --git a/web/src/components/player/dynamic/DynamicVideoPlayer.tsx b/web/src/components/player/dynamic/DynamicVideoPlayer.tsx index f26826fa7..7fe5bd50b 100644 --- a/web/src/components/player/dynamic/DynamicVideoPlayer.tsx +++ b/web/src/components/player/dynamic/DynamicVideoPlayer.tsx @@ -14,7 +14,10 @@ import { VideoResolutionType } from "@/types/live"; import axios from "axios"; import { cn } from "@/lib/utils"; import { useTranslation } from "react-i18next"; -import { calculateInpointOffset } from "@/utils/videoUtil"; +import { + calculateInpointOffset, + calculateSeekPosition, +} from "@/utils/videoUtil"; import { isFirefox } from "react-device-detect"; /** @@ -109,10 +112,10 @@ export default function DynamicVideoPlayer({ const [isLoading, setIsLoading] = useState(false); const [isBuffering, setIsBuffering] = useState(false); const [loadingTimeout, setLoadingTimeout] = useState(); - const [source, setSource] = useState({ - playlist: `${apiHost}vod/${camera}/start/${timeRange.after}/end/${timeRange.before}/master.m3u8`, - startPosition: startTimestamp ? startTimestamp - timeRange.after : 0, - }); + + // Don't set source until recordings load - we need accurate startPosition + // to avoid hls.js clamping to video end when startPosition exceeds duration + const [source, setSource] = useState(undefined); // start at correct time @@ -184,7 +187,7 @@ export default function DynamicVideoPlayer({ ); useEffect(() => { - if (!controller || !recordings?.length) { + if (!recordings?.length) { if (recordings?.length == 0) { setNoRecording(true); } @@ -192,10 +195,6 @@ export default function DynamicVideoPlayer({ return; } - if (playerRef.current) { - playerRef.current.autoplay = !isScrubbing; - } - let startPosition = undefined; if (startTimestamp) { @@ -203,14 +202,12 @@ export default function DynamicVideoPlayer({ recordingParams.after, (recordings || [])[0], ); - const idealStartPosition = Math.max( - 0, - startTimestamp - timeRange.after - inpointOffset, - ); - if (idealStartPosition >= recordings[0].start_time - timeRange.after) { - startPosition = idealStartPosition; - } + startPosition = calculateSeekPosition( + startTimestamp, + recordings, + inpointOffset, + ); } setSource({ @@ -218,6 +215,18 @@ export default function DynamicVideoPlayer({ startPosition, }); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [recordings]); + + useEffect(() => { + if (!controller || !recordings?.length) { + return; + } + + if (playerRef.current) { + playerRef.current.autoplay = !isScrubbing; + } + setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000)); controller.newPlayback({ @@ -225,7 +234,7 @@ export default function DynamicVideoPlayer({ timeRange, }); - // we only want this to change when recordings update + // we only want this to change when controller or recordings update // eslint-disable-next-line react-hooks/exhaustive-deps }, [controller, recordings]); @@ -263,46 +272,48 @@ export default function DynamicVideoPlayer({ return ( <> - { - if (onSeekToTime) { - onSeekToTime(timestamp, play); - } - }} - onPlaying={() => { - if (isScrubbing) { - playerRef.current?.pause(); - } + {source && ( + { + if (onSeekToTime) { + onSeekToTime(timestamp, play); + } + }} + onPlaying={() => { + if (isScrubbing) { + playerRef.current?.pause(); + } - if (loadingTimeout) { - clearTimeout(loadingTimeout); - } + if (loadingTimeout) { + clearTimeout(loadingTimeout); + } - setNoRecording(false); - }} - setFullResolution={setFullResolution} - onUploadFrame={onUploadFrameToPlus} - toggleFullscreen={toggleFullscreen} - onError={(error) => { - if (error == "stalled" && !isScrubbing) { - setIsBuffering(true); - } - }} - isDetailMode={isDetailMode} - camera={contextCamera || camera} - currentTimeOverride={currentTime} - /> + setNoRecording(false); + }} + setFullResolution={setFullResolution} + onUploadFrame={onUploadFrameToPlus} + toggleFullscreen={toggleFullscreen} + onError={(error) => { + if (error == "stalled" && !isScrubbing) { + setIsBuffering(true); + } + }} + isDetailMode={isDetailMode} + camera={contextCamera || camera} + currentTimeOverride={currentTime} + /> + )} recordings[recordings.length - 1].end_time + ) { + return undefined; + } + + let seekSeconds = 0; + + (recordings || []).every((segment) => { + // if the next segment is past the desired time, stop calculating + if (segment.start_time > timestamp) { + return false; + } + + if (segment.end_time < timestamp) { + // Add the full duration of this segment + seekSeconds += segment.end_time - segment.start_time; + return true; + } + + // We're in this segment - calculate position within it + seekSeconds += + segment.end_time - segment.start_time - (segment.end_time - timestamp); + return true; + }); + + // Adjust for HLS inpoint offset + seekSeconds -= inpointOffset; + + return seekSeconds >= 0 ? seekSeconds : undefined; +} diff --git a/web/src/views/system/GeneralMetrics.tsx b/web/src/views/system/GeneralMetrics.tsx index b7fb08e79..a05b1b82a 100644 --- a/web/src/views/system/GeneralMetrics.tsx +++ b/web/src/views/system/GeneralMetrics.tsx @@ -375,6 +375,50 @@ export default function GeneralMetrics({ return Object.keys(series).length > 0 ? Object.values(series) : undefined; }, [statsHistory]); + // Check if Intel GPU has all 0% usage values (known bug) + const showIntelGpuWarning = useMemo(() => { + if (!statsHistory || statsHistory.length < 3) { + return false; + } + + const gpuKeys = Object.keys(statsHistory[0]?.gpu_usages ?? {}); + const hasIntelGpu = gpuKeys.some( + (key) => key === "intel-vaapi" || key === "intel-qsv", + ); + + if (!hasIntelGpu) { + return false; + } + + // Check if all GPU usage values are 0% across all stats + let allZero = true; + let hasDataPoints = false; + + for (const stats of statsHistory) { + if (!stats) { + continue; + } + + Object.entries(stats.gpu_usages || {}).forEach(([key, gpuStats]) => { + if (key === "intel-vaapi" || key === "intel-qsv") { + if (gpuStats.gpu) { + hasDataPoints = true; + const gpuValue = parseFloat(gpuStats.gpu.slice(0, -1)); + if (!isNaN(gpuValue) && gpuValue > 0) { + allZero = false; + } + } + } + }); + + if (!allZero) { + break; + } + } + + return hasDataPoints && allZero; + }, [statsHistory]); + // npu stats const npuSeries = useMemo(() => { @@ -639,8 +683,46 @@ export default function GeneralMetrics({ <> {statsHistory.length != 0 ? (
-
+
{t("general.hardwareInfo.gpuUsage")} + {showIntelGpuWarning && ( + + + + + +
+
+ {t( + "general.hardwareInfo.intelGpuWarning.title", + )} +
+
+ {t( + "general.hardwareInfo.intelGpuWarning.description", + )} +
+
+
+
+ )}
{gpuSeries.map((series) => (