Compare commits

..

3 Commits

Author SHA1 Message Date
Nicolas Mowen
835ea0d05e Reduce need to revalidate stream info 2025-11-10 14:23:30 -07:00
Nicolas Mowen
fdaf524015 Improve stream fetching logic 2025-11-10 14:20:19 -07:00
Josh Hawkins
c371fc0c87
Miscellaneous Fixes (#20866)
* Don't warn when event ids have expired for trigger sync

* Import faster_whisper conditinally to avoid illegal instruction

* Catch OpenVINO runtime error

* fix race condition in detail stream context

navigating between tracked objects in Explore would sometimes prevent the object track from appearing

* Handle case where classification images are deleted

* Adjust default rounded corners on larger screens

* Improve flow handling for classification state

* Remove images when wizard is cancelled

* Improve deletion handling for classes

* Set constraints on review buffers

* Update to support correct data format

* Set minimum duration for recording based review items

* Use friendly name in review genai prompt

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-11-10 10:03:56 -07:00
12 changed files with 172 additions and 62 deletions

View File

@ -177,6 +177,12 @@ class CameraConfig(FrigateBaseModel):
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]: def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
return self._ffmpeg_cmds return self._ffmpeg_cmds
def get_formatted_name(self) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
if self.friendly_name:
return self.friendly_name
return self.name.replace("_", " ").title() if self.name else ""
def create_ffmpeg_cmds(self): def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self: if "_ffmpeg_cmds" in self:
return return

View File

@ -56,6 +56,12 @@ class ZoneConfig(BaseModel):
def contour(self) -> np.ndarray: def contour(self) -> np.ndarray:
return self._contour return self._contour
def get_formatted_name(self, zone_name: str) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the zone name."""
if self.friendly_name:
return self.friendly_name
return zone_name.replace("_", " ").title()
@field_validator("objects", mode="before") @field_validator("objects", mode="before")
@classmethod @classmethod
def validate_objects(cls, v): def validate_objects(cls, v):

View File

@ -16,6 +16,7 @@ from peewee import DoesNotExist
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera import CameraConfig
from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
from frigate.data_processing.types import PostProcessDataEnum from frigate.data_processing.types import PostProcessDataEnum
@ -30,6 +31,7 @@ from ..types import DataProcessorMetrics
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10 RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
MIN_RECORDING_DURATION = 10
class ReviewDescriptionProcessor(PostProcessorApi): class ReviewDescriptionProcessor(PostProcessorApi):
@ -130,7 +132,17 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if image_source == ImageSourceEnum.recordings: if image_source == ImageSourceEnum.recordings:
duration = final_data["end_time"] - final_data["start_time"] duration = final_data["end_time"] - final_data["start_time"]
buffer_extension = duration * RECORDING_BUFFER_EXTENSION_PERCENT buffer_extension = min(
10, max(2, duration * RECORDING_BUFFER_EXTENSION_PERCENT)
)
# Ensure minimum total duration for short review items
# This provides better context for brief events
total_duration = duration + (2 * buffer_extension)
if total_duration < MIN_RECORDING_DURATION:
# Expand buffer to reach minimum duration, still respecting max of 10s per side
additional_buffer_per_side = (MIN_RECORDING_DURATION - duration) / 2
buffer_extension = min(10, additional_buffer_per_side)
thumbs = self.get_recording_frames( thumbs = self.get_recording_frames(
camera, camera,
@ -182,7 +194,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
self.requestor, self.requestor,
self.genai_client, self.genai_client,
self.review_desc_speed, self.review_desc_speed,
camera, camera_config,
final_data, final_data,
thumbs, thumbs,
camera_config.review.genai, camera_config.review.genai,
@ -411,7 +423,7 @@ def run_analysis(
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
genai_client: GenAIClient, genai_client: GenAIClient,
review_inference_speed: InferenceSpeed, review_inference_speed: InferenceSpeed,
camera: str, camera_config: CameraConfig,
final_data: dict[str, str], final_data: dict[str, str],
thumbs: list[bytes], thumbs: list[bytes],
genai_config: GenAIReviewConfig, genai_config: GenAIReviewConfig,
@ -419,10 +431,19 @@ def run_analysis(
attribute_labels: list[str], attribute_labels: list[str],
) -> None: ) -> None:
start = datetime.datetime.now().timestamp() start = datetime.datetime.now().timestamp()
# Format zone names using zone config friendly names if available
formatted_zones = []
for zone_name in final_data["data"]["zones"]:
if zone_name in camera_config.zones:
formatted_zones.append(
camera_config.zones[zone_name].get_formatted_name(zone_name)
)
analytics_data = { analytics_data = {
"id": final_data["id"], "id": final_data["id"],
"camera": camera, "camera": camera_config.get_formatted_name(),
"zones": final_data["data"]["zones"], "zones": formatted_zones,
"start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime( "start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime(
"%A, %I:%M %p" "%A, %I:%M %p"
), ),

View File

@ -51,8 +51,7 @@ class GenAIClient:
def get_concern_prompt() -> str: def get_concern_prompt() -> str:
if concerns: if concerns:
concern_list = "\n - ".join(concerns) concern_list = "\n - ".join(concerns)
return f""" return f"""- `other_concerns` (list of strings): Include a list of any of the following concerns that are occurring:
- `other_concerns` (list of strings): Include a list of any of the following concerns that are occurring:
- {concern_list}""" - {concern_list}"""
else: else:
return "" return ""
@ -70,7 +69,7 @@ class GenAIClient:
return "\n- (No objects detected)" return "\n- (No objects detected)"
context_prompt = f""" context_prompt = f"""
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera. Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera.
## Normal Activity Patterns for This Property ## Normal Activity Patterns for This Property
@ -110,7 +109,7 @@ Your response MUST be a flat JSON object with:
- Frame 1 = earliest, Frame {len(thumbnails)} = latest - Frame 1 = earliest, Frame {len(thumbnails)} = latest
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"} - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"}
## Objects in Scene ## Objects in Scene

View File

@ -28,6 +28,7 @@ import {
CustomClassificationModelConfig, CustomClassificationModelConfig,
FrigateConfig, FrigateConfig,
} from "@/types/frigateConfig"; } from "@/types/frigateConfig";
import { ClassificationDatasetResponse } from "@/types/classification";
import { getTranslatedLabel } from "@/utils/i18n"; import { getTranslatedLabel } from "@/utils/i18n";
import { zodResolver } from "@hookform/resolvers/zod"; import { zodResolver } from "@hookform/resolvers/zod";
import axios from "axios"; import axios from "axios";
@ -140,16 +141,19 @@ export default function ClassificationModelEditDialog({
}); });
// Fetch dataset to get current classes for state models // Fetch dataset to get current classes for state models
const { data: dataset } = useSWR<{ const { data: dataset } = useSWR<ClassificationDatasetResponse>(
[id: string]: string[]; isStateModel ? `classification/${model.name}/dataset` : null,
}>(isStateModel ? `classification/${model.name}/dataset` : null, { {
revalidateOnFocus: false, revalidateOnFocus: false,
}); },
);
// Update form with classes from dataset when loaded // Update form with classes from dataset when loaded
useEffect(() => { useEffect(() => {
if (isStateModel && dataset) { if (isStateModel && dataset?.categories) {
const classes = Object.keys(dataset).filter((key) => key !== "none"); const classes = Object.keys(dataset.categories).filter(
(key) => key !== "none",
);
if (classes.length > 0) { if (classes.length > 0) {
(form as ReturnType<typeof useForm<StateFormData>>).setValue( (form as ReturnType<typeof useForm<StateFormData>>).setValue(
"classes", "classes",

View File

@ -6,6 +6,7 @@ import { LivePlayerMode, LiveStreamMetadata } from "@/types/live";
export default function useCameraLiveMode( export default function useCameraLiveMode(
cameras: CameraConfig[], cameras: CameraConfig[],
windowVisible: boolean, windowVisible: boolean,
activeStreams?: { [cameraName: string]: string },
) { ) {
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
@ -20,16 +21,20 @@ export default function useCameraLiveMode(
); );
if (isRestreamed) { if (isRestreamed) {
Object.values(camera.live.streams).forEach((streamName) => { if (activeStreams && activeStreams[camera.name]) {
streamNames.add(streamName); streamNames.add(activeStreams[camera.name]);
}); } else {
Object.values(camera.live.streams).forEach((streamName) => {
streamNames.add(streamName);
});
}
} }
}); });
return streamNames.size > 0 return streamNames.size > 0
? Array.from(streamNames).sort().join(",") ? Array.from(streamNames).sort().join(",")
: null; : null;
}, [cameras, config]); }, [cameras, config, activeStreams]);
const streamsFetcher = useCallback(async (key: string) => { const streamsFetcher = useCallback(async (key: string) => {
const streamNames = key.split(","); const streamNames = key.split(",");
@ -68,7 +73,9 @@ export default function useCameraLiveMode(
[key: string]: LiveStreamMetadata; [key: string]: LiveStreamMetadata;
}>(restreamedStreamsKey, streamsFetcher, { }>(restreamedStreamsKey, streamsFetcher, {
revalidateOnFocus: false, revalidateOnFocus: false,
dedupingInterval: 10000, revalidateOnReconnect: false,
revalidateIfStale: false,
dedupingInterval: 60000,
}); });
const [preferredLiveModes, setPreferredLiveModes] = useState<{ const [preferredLiveModes, setPreferredLiveModes] = useState<{

View File

@ -20,3 +20,17 @@ export type ClassificationThreshold = {
recognition: number; recognition: number;
unknown: number; unknown: number;
}; };
export type ClassificationDatasetResponse = {
categories: {
[id: string]: string[];
};
training_metadata: {
has_trained: boolean;
last_training_date: string | null;
last_training_image_count: number;
current_image_count: number;
new_images_count: number;
dataset_changed: boolean;
} | null;
};

View File

@ -11,6 +11,7 @@ import {
CustomClassificationModelConfig, CustomClassificationModelConfig,
FrigateConfig, FrigateConfig,
} from "@/types/frigateConfig"; } from "@/types/frigateConfig";
import { ClassificationDatasetResponse } from "@/types/classification";
import { useCallback, useEffect, useMemo, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { FaFolderPlus } from "react-icons/fa"; import { FaFolderPlus } from "react-icons/fa";
@ -209,9 +210,10 @@ type ModelCardProps = {
function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) { function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
const { t } = useTranslation(["views/classificationModel"]); const { t } = useTranslation(["views/classificationModel"]);
const { data: dataset } = useSWR<{ const { data: dataset } = useSWR<ClassificationDatasetResponse>(
[id: string]: string[]; `classification/${config.name}/dataset`,
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false }); { revalidateOnFocus: false },
);
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false); const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const [editDialogOpen, setEditDialogOpen] = useState(false); const [editDialogOpen, setEditDialogOpen] = useState(false);
@ -260,20 +262,25 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
}, []); }, []);
const coverImage = useMemo(() => { const coverImage = useMemo(() => {
if (!dataset) { if (!dataset || !dataset.categories) {
return undefined; return undefined;
} }
const keys = Object.keys(dataset).filter((key) => key != "none"); const keys = Object.keys(dataset.categories).filter((key) => key != "none");
const selectedKey = keys[0]; if (keys.length === 0) {
return undefined;
}
if (!dataset[selectedKey]) { const selectedKey = keys[0];
const images = dataset.categories[selectedKey];
if (!images || images.length === 0) {
return undefined; return undefined;
} }
return { return {
name: selectedKey, name: selectedKey,
img: dataset[selectedKey][0], img: images[0],
}; };
}, [dataset]); }, [dataset]);
@ -317,11 +324,19 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
)} )}
onClick={onClick} onClick={onClick}
> >
<img {coverImage ? (
className="size-full" <>
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`} <img
/> className="size-full"
<ImageShadowOverlay lowerClassName="h-[30%] z-0" /> src={`${baseUrl}clips/${config.name}/dataset/${coverImage.name}/${coverImage.img}`}
/>
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
</>
) : (
<div className="flex size-full items-center justify-center bg-background_alt">
<MdModelTraining className="size-16 text-muted-foreground" />
</div>
)}
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize"> <div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
{config.name} {config.name}
</div> </div>

View File

@ -59,7 +59,11 @@ import { useNavigate } from "react-router-dom";
import { IoMdArrowRoundBack } from "react-icons/io"; import { IoMdArrowRoundBack } from "react-icons/io";
import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog"; import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog";
import useApiFilter from "@/hooks/use-api-filter"; import useApiFilter from "@/hooks/use-api-filter";
import { ClassificationItemData, TrainFilter } from "@/types/classification"; import {
ClassificationDatasetResponse,
ClassificationItemData,
TrainFilter,
} from "@/types/classification";
import { import {
ClassificationCard, ClassificationCard,
GroupedClassificationCard, GroupedClassificationCard,
@ -118,17 +122,10 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
const { data: trainImages, mutate: refreshTrain } = useSWR<string[]>( const { data: trainImages, mutate: refreshTrain } = useSWR<string[]>(
`classification/${model.name}/train`, `classification/${model.name}/train`,
); );
const { data: datasetResponse, mutate: refreshDataset } = useSWR<{ const { data: datasetResponse, mutate: refreshDataset } =
categories: { [id: string]: string[] }; useSWR<ClassificationDatasetResponse>(
training_metadata: { `classification/${model.name}/dataset`,
has_trained: boolean; );
last_training_date: string | null;
last_training_image_count: number;
current_image_count: number;
new_images_count: number;
dataset_changed: boolean;
} | null;
}>(`classification/${model.name}/dataset`);
const dataset = datasetResponse?.categories || {}; const dataset = datasetResponse?.categories || {};
const trainingMetadata = datasetResponse?.training_metadata; const trainingMetadata = datasetResponse?.training_metadata;

View File

@ -86,14 +86,6 @@ export default function DraggableGridLayout({
// preferred live modes per camera // preferred live modes per camera
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible);
const [globalAutoLive] = usePersistence("autoLiveView", true); const [globalAutoLive] = usePersistence("autoLiveView", true);
const [displayCameraNames] = usePersistence("displayCameraNames", false); const [displayCameraNames] = usePersistence("displayCameraNames", false);
@ -106,6 +98,33 @@ export default function DraggableGridLayout({
} }
}, [allGroupsStreamingSettings, cameraGroup]); }, [allGroupsStreamingSettings, cameraGroup]);
const activeStreams = useMemo(() => {
const streams: { [cameraName: string]: string } = {};
cameras.forEach((camera) => {
const availableStreams = camera.live.streams || {};
const streamNameFromSettings =
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
const streamExists =
streamNameFromSettings &&
Object.values(availableStreams).includes(streamNameFromSettings);
const streamName = streamExists
? streamNameFromSettings
: Object.values(availableStreams)[0] || "";
streams[camera.name] = streamName;
});
return streams;
}, [cameras, currentGroupStreamingSettings]);
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
// grid layout // grid layout
const ResponsiveGridLayout = useMemo(() => WidthProvider(Responsive), []); const ResponsiveGridLayout = useMemo(() => WidthProvider(Responsive), []);

View File

@ -162,6 +162,9 @@ export default function LiveCameraView({
isRestreamed ? `go2rtc/streams/${streamName}` : null, isRestreamed ? `go2rtc/streams/${streamName}` : null,
{ {
revalidateOnFocus: false, revalidateOnFocus: false,
revalidateOnReconnect: false,
revalidateIfStale: false,
dedupingInterval: 60000,
}, },
); );

View File

@ -202,14 +202,6 @@ export default function LiveDashboardView({
}; };
}, []); }, []);
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible);
const [globalAutoLive] = usePersistence("autoLiveView", true); const [globalAutoLive] = usePersistence("autoLiveView", true);
const [displayCameraNames] = usePersistence("displayCameraNames", false); const [displayCameraNames] = usePersistence("displayCameraNames", false);
@ -239,6 +231,33 @@ export default function LiveDashboardView({
[visibleCameraObserver.current], [visibleCameraObserver.current],
); );
const activeStreams = useMemo(() => {
const streams: { [cameraName: string]: string } = {};
cameras.forEach((camera) => {
const availableStreams = camera.live.streams || {};
const streamNameFromSettings =
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
const streamExists =
streamNameFromSettings &&
Object.values(availableStreams).includes(streamNameFromSettings);
const streamName = streamExists
? streamNameFromSettings
: Object.values(availableStreams)[0] || "";
streams[camera.name] = streamName;
});
return streams;
}, [cameras, currentGroupStreamingSettings]);
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
const birdseyeConfig = useMemo(() => config?.birdseye, [config]); const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
const handleError = useCallback( const handleError = useCallback(