mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-06 05:24:11 +03:00
Merge 8d71d8be4a into 1a75251ffb
This commit is contained in:
commit
62e3887745
@ -157,3 +157,19 @@ Only one `speech` event may be transcribed at a time. Frigate does not automatic
|
||||
:::
|
||||
|
||||
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a supported Nvidia GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
|
||||
|
||||
#### FAQ
|
||||
|
||||
1. Why doesn't Frigate automatically transcribe all `speech` events?
|
||||
|
||||
Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That’s a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise.
|
||||
|
||||
Because transcription is **serialized (one event at a time)** and speech events can be generated far faster than they can be processed, an auto-transcribe toggle would very quickly create an ever-growing backlog and degrade core functionality. For the amount of engineering and risk involved, it adds **very little practical value** for the majority of deployments, which are often on low-powered, edge hardware.
|
||||
|
||||
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
|
||||
|
||||
2. Why don't you save live transcription text and use that for `speech` events?
|
||||
|
||||
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
|
||||
|
||||
Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That’s why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event.
|
||||
|
||||
@ -99,6 +99,42 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
if self.inference_speed:
|
||||
self.inference_speed.update(duration)
|
||||
|
||||
def _should_save_image(
|
||||
self, camera: str, detected_state: str, score: float = 1.0
|
||||
) -> bool:
|
||||
"""
|
||||
Determine if we should save the image for training.
|
||||
Save when:
|
||||
- State is changing or being verified (regardless of score)
|
||||
- Score is less than 100% (even if state matches, useful for training)
|
||||
Don't save when:
|
||||
- State is stable (matches current_state) AND score is 100%
|
||||
"""
|
||||
if camera not in self.state_history:
|
||||
# First detection for this camera, save it
|
||||
return True
|
||||
|
||||
verification = self.state_history[camera]
|
||||
current_state = verification.get("current_state")
|
||||
pending_state = verification.get("pending_state")
|
||||
|
||||
# Save if there's a pending state change being verified
|
||||
if pending_state is not None:
|
||||
return True
|
||||
|
||||
# Save if the detected state differs from the current verified state
|
||||
# (state is changing)
|
||||
if current_state is not None and detected_state != current_state:
|
||||
return True
|
||||
|
||||
# If score is less than 100%, save even if state matches
|
||||
# (useful for training to improve confidence)
|
||||
if score < 1.0:
|
||||
return True
|
||||
|
||||
# Don't save if state is stable (detected_state == current_state) AND score is 100%
|
||||
return False
|
||||
|
||||
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
|
||||
"""
|
||||
Verify state change requires 3 consecutive identical states before publishing.
|
||||
@ -212,14 +248,16 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
)
|
||||
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||
if self._should_save_image(camera, "unknown", 0.0):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
)
|
||||
return
|
||||
|
||||
input = np.expand_dims(resized_frame, axis=0)
|
||||
@ -236,14 +274,17 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
score = round(probs[best_id], 2)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
)
|
||||
detected_state = self.labelmap[best_id]
|
||||
|
||||
if self._should_save_image(camera, detected_state, score):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
detected_state,
|
||||
score,
|
||||
)
|
||||
|
||||
if score < self.model_config.threshold:
|
||||
logger.debug(
|
||||
@ -251,7 +292,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
)
|
||||
return
|
||||
|
||||
detected_state = self.labelmap[best_id]
|
||||
verified_state = self.verify_state_change(camera, detected_state)
|
||||
|
||||
if verified_state is not None:
|
||||
|
||||
@ -190,7 +190,11 @@ class OnvifController:
|
||||
ptz: ONVIFService = await onvif.create_ptz_service()
|
||||
self.cams[camera_name]["ptz"] = ptz
|
||||
|
||||
imaging: ONVIFService = await onvif.create_imaging_service()
|
||||
try:
|
||||
imaging: ONVIFService = await onvif.create_imaging_service()
|
||||
except (Fault, ONVIFError, TransportError, Exception) as e:
|
||||
logger.debug(f"Imaging service not supported for {camera_name}: {e}")
|
||||
imaging = None
|
||||
self.cams[camera_name]["imaging"] = imaging
|
||||
try:
|
||||
video_sources = await media.GetVideoSources()
|
||||
@ -381,7 +385,10 @@ class OnvifController:
|
||||
f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}"
|
||||
)
|
||||
|
||||
if self.cams[camera_name]["video_source_token"] is not None:
|
||||
if (
|
||||
self.cams[camera_name]["video_source_token"] is not None
|
||||
and imaging is not None
|
||||
):
|
||||
try:
|
||||
imaging_capabilities = await imaging.GetImagingSettings(
|
||||
{"VideoSourceToken": self.cams[camera_name]["video_source_token"]}
|
||||
@ -421,6 +428,7 @@ class OnvifController:
|
||||
if (
|
||||
"focus" in self.cams[camera_name]["features"]
|
||||
and self.cams[camera_name]["video_source_token"]
|
||||
and self.cams[camera_name]["imaging"] is not None
|
||||
):
|
||||
try:
|
||||
stop_request = self.cams[camera_name]["imaging"].create_type("Stop")
|
||||
@ -648,6 +656,7 @@ class OnvifController:
|
||||
if (
|
||||
"focus" not in self.cams[camera_name]["features"]
|
||||
or not self.cams[camera_name]["video_source_token"]
|
||||
or self.cams[camera_name]["imaging"] is None
|
||||
):
|
||||
logger.error(f"{camera_name} does not support ONVIF continuous focus.")
|
||||
return
|
||||
|
||||
@ -124,45 +124,50 @@ def capture_frames(
|
||||
config_subscriber.check_for_updates()
|
||||
return config.enabled
|
||||
|
||||
while not stop_event.is_set():
|
||||
if not get_enabled_state():
|
||||
logger.debug(f"Stopping capture thread for disabled {config.name}")
|
||||
break
|
||||
|
||||
fps.value = frame_rate.eps()
|
||||
skipped_fps.value = skipped_eps.eps()
|
||||
current_frame.value = datetime.now().timestamp()
|
||||
frame_name = f"{config.name}_frame{frame_index}"
|
||||
frame_buffer = frame_manager.write(frame_name)
|
||||
try:
|
||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||
except Exception:
|
||||
# shutdown has been initiated
|
||||
if stop_event.is_set():
|
||||
try:
|
||||
while not stop_event.is_set():
|
||||
if not get_enabled_state():
|
||||
logger.debug(f"Stopping capture thread for disabled {config.name}")
|
||||
break
|
||||
|
||||
logger.error(f"{config.name}: Unable to read frames from ffmpeg process.")
|
||||
fps.value = frame_rate.eps()
|
||||
skipped_fps.value = skipped_eps.eps()
|
||||
current_frame.value = datetime.now().timestamp()
|
||||
frame_name = f"{config.name}_frame{frame_index}"
|
||||
frame_buffer = frame_manager.write(frame_name)
|
||||
try:
|
||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||
except Exception:
|
||||
# shutdown has been initiated
|
||||
if stop_event.is_set():
|
||||
break
|
||||
|
||||
if ffmpeg_process.poll() is not None:
|
||||
logger.error(
|
||||
f"{config.name}: ffmpeg process is not running. exiting capture thread..."
|
||||
f"{config.name}: Unable to read frames from ffmpeg process."
|
||||
)
|
||||
break
|
||||
|
||||
continue
|
||||
if ffmpeg_process.poll() is not None:
|
||||
logger.error(
|
||||
f"{config.name}: ffmpeg process is not running. exiting capture thread..."
|
||||
)
|
||||
break
|
||||
|
||||
frame_rate.update()
|
||||
continue
|
||||
|
||||
# don't lock the queue to check, just try since it should rarely be full
|
||||
try:
|
||||
# add to the queue
|
||||
frame_queue.put((frame_name, current_frame.value), False)
|
||||
frame_manager.close(frame_name)
|
||||
except queue.Full:
|
||||
# if the queue is full, skip this frame
|
||||
skipped_eps.update()
|
||||
frame_rate.update()
|
||||
|
||||
frame_index = 0 if frame_index == shm_frame_count - 1 else frame_index + 1
|
||||
# don't lock the queue to check, just try since it should rarely be full
|
||||
try:
|
||||
# add to the queue
|
||||
frame_queue.put((frame_name, current_frame.value), False)
|
||||
frame_manager.close(frame_name)
|
||||
except queue.Full:
|
||||
# if the queue is full, skip this frame
|
||||
skipped_eps.update()
|
||||
|
||||
frame_index = 0 if frame_index == shm_frame_count - 1 else frame_index + 1
|
||||
finally:
|
||||
config_subscriber.stop()
|
||||
|
||||
|
||||
class CameraWatchdog(threading.Thread):
|
||||
@ -234,6 +239,16 @@ class CameraWatchdog(threading.Thread):
|
||||
else:
|
||||
self.ffmpeg_detect_process.wait()
|
||||
|
||||
# Wait for old capture thread to fully exit before starting a new one
|
||||
if self.capture_thread is not None and self.capture_thread.is_alive():
|
||||
self.logger.info("Waiting for capture thread to exit...")
|
||||
self.capture_thread.join(timeout=5)
|
||||
|
||||
if self.capture_thread.is_alive():
|
||||
self.logger.warning(
|
||||
f"Capture thread for {self.config.name} did not exit in time"
|
||||
)
|
||||
|
||||
self.logger.error(
|
||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||
)
|
||||
|
||||
@ -37,7 +37,7 @@ import { useForm } from "react-hook-form";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { LuPlus, LuX } from "react-icons/lu";
|
||||
import { toast } from "sonner";
|
||||
import useSWR from "swr";
|
||||
import useSWR, { mutate } from "swr";
|
||||
import { z } from "zod";
|
||||
|
||||
type ClassificationModelEditDialogProps = {
|
||||
@ -240,15 +240,61 @@ export default function ClassificationModelEditDialog({
|
||||
position: "top-center",
|
||||
});
|
||||
} else {
|
||||
// State model - update classes
|
||||
// Note: For state models, updating classes requires renaming categories
|
||||
// which is handled through the dataset API, not the config API
|
||||
// We'll need to implement this by calling the rename endpoint for each class
|
||||
// For now, we just show a message that this requires retraining
|
||||
const stateData = data as StateFormData;
|
||||
const newClasses = stateData.classes.filter(
|
||||
(c) => c.trim().length > 0,
|
||||
);
|
||||
const oldClasses = dataset?.categories
|
||||
? Object.keys(dataset.categories).filter((key) => key !== "none")
|
||||
: [];
|
||||
|
||||
toast.info(t("edit.stateClassesInfo"), {
|
||||
position: "top-center",
|
||||
});
|
||||
const renameMap = new Map<string, string>();
|
||||
const maxLength = Math.max(oldClasses.length, newClasses.length);
|
||||
|
||||
for (let i = 0; i < maxLength; i++) {
|
||||
const oldClass = oldClasses[i];
|
||||
const newClass = newClasses[i];
|
||||
|
||||
if (oldClass && newClass && oldClass !== newClass) {
|
||||
renameMap.set(oldClass, newClass);
|
||||
}
|
||||
}
|
||||
|
||||
const renamePromises = Array.from(renameMap.entries()).map(
|
||||
async ([oldName, newName]) => {
|
||||
try {
|
||||
await axios.put(
|
||||
`/classification/${model.name}/dataset/${oldName}/rename`,
|
||||
{
|
||||
new_category: newName,
|
||||
},
|
||||
);
|
||||
} catch (err) {
|
||||
const error = err as {
|
||||
response?: { data?: { message?: string; detail?: string } };
|
||||
};
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
throw new Error(
|
||||
`Failed to rename ${oldName} to ${newName}: ${errorMessage}`,
|
||||
);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
if (renamePromises.length > 0) {
|
||||
await Promise.all(renamePromises);
|
||||
await mutate(`classification/${model.name}/dataset`);
|
||||
toast.success(t("toast.success.updatedModel"), {
|
||||
position: "top-center",
|
||||
});
|
||||
} else {
|
||||
toast.info(t("edit.stateClassesInfo"), {
|
||||
position: "top-center",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onSuccess();
|
||||
@ -256,8 +302,10 @@ export default function ClassificationModelEditDialog({
|
||||
} catch (err) {
|
||||
const error = err as {
|
||||
response?: { data?: { message?: string; detail?: string } };
|
||||
message?: string;
|
||||
};
|
||||
const errorMessage =
|
||||
error.message ||
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
@ -268,7 +316,7 @@ export default function ClassificationModelEditDialog({
|
||||
setIsSaving(false);
|
||||
}
|
||||
},
|
||||
[isObjectModel, model, t, onSuccess, onClose],
|
||||
[isObjectModel, model, dataset, t, onSuccess, onClose],
|
||||
);
|
||||
|
||||
const handleCancel = useCallback(() => {
|
||||
|
||||
@ -48,6 +48,7 @@ import { useTranslation } from "react-i18next";
|
||||
import { useDateLocale } from "@/hooks/use-date-locale";
|
||||
import { useIsAdmin } from "@/hooks/use-is-admin";
|
||||
import { CameraNameLabel } from "../camera/FriendlyNameLabel";
|
||||
import { LiveStreamMetadata } from "@/types/live";
|
||||
|
||||
type LiveContextMenuProps = {
|
||||
className?: string;
|
||||
@ -68,6 +69,7 @@ type LiveContextMenuProps = {
|
||||
resetPreferredLiveMode: () => void;
|
||||
config?: FrigateConfig;
|
||||
children?: ReactNode;
|
||||
streamMetadata?: { [key: string]: LiveStreamMetadata };
|
||||
};
|
||||
export default function LiveContextMenu({
|
||||
className,
|
||||
@ -88,6 +90,7 @@ export default function LiveContextMenu({
|
||||
resetPreferredLiveMode,
|
||||
config,
|
||||
children,
|
||||
streamMetadata,
|
||||
}: LiveContextMenuProps) {
|
||||
const { t } = useTranslation("views/live");
|
||||
const [showSettings, setShowSettings] = useState(false);
|
||||
@ -558,6 +561,7 @@ export default function LiveContextMenu({
|
||||
setGroupStreamingSettings={setGroupStreamingSettings}
|
||||
setIsDialogOpen={setShowSettings}
|
||||
onSave={onSave}
|
||||
streamMetadata={streamMetadata}
|
||||
/>
|
||||
</Dialog>
|
||||
</div>
|
||||
|
||||
@ -38,6 +38,7 @@ import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name";
|
||||
type CameraStreamingDialogProps = {
|
||||
camera: string;
|
||||
groupStreamingSettings: GroupStreamingSettings;
|
||||
streamMetadata?: { [key: string]: LiveStreamMetadata };
|
||||
setGroupStreamingSettings: React.Dispatch<
|
||||
React.SetStateAction<GroupStreamingSettings>
|
||||
>;
|
||||
@ -48,6 +49,7 @@ type CameraStreamingDialogProps = {
|
||||
export function CameraStreamingDialog({
|
||||
camera,
|
||||
groupStreamingSettings,
|
||||
streamMetadata,
|
||||
setGroupStreamingSettings,
|
||||
setIsDialogOpen,
|
||||
onSave,
|
||||
@ -76,12 +78,7 @@ export function CameraStreamingDialog({
|
||||
[config, streamName],
|
||||
);
|
||||
|
||||
const { data: cameraMetadata } = useSWR<LiveStreamMetadata>(
|
||||
isRestreamed ? `go2rtc/streams/${streamName}` : null,
|
||||
{
|
||||
revalidateOnFocus: false,
|
||||
},
|
||||
);
|
||||
const cameraMetadata = streamName ? streamMetadata?.[streamName] : undefined;
|
||||
|
||||
const supportsAudioOutput = useMemo(() => {
|
||||
if (!cameraMetadata) {
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
||||
import { useCallback, useEffect, useState, useMemo } from "react";
|
||||
import useSWR from "swr";
|
||||
import { LivePlayerMode, LiveStreamMetadata } from "@/types/live";
|
||||
import { LivePlayerMode } from "@/types/live";
|
||||
import useDeferredStreamMetadata from "./use-deferred-stream-metadata";
|
||||
|
||||
export default function useCameraLiveMode(
|
||||
cameras: CameraConfig[],
|
||||
@ -11,9 +11,9 @@ export default function useCameraLiveMode(
|
||||
) {
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
|
||||
// Get comma-separated list of restreamed stream names for SWR key
|
||||
const restreamedStreamsKey = useMemo(() => {
|
||||
if (!cameras || !config) return null;
|
||||
// Compute which streams need metadata (restreamed streams only)
|
||||
const restreamedStreamNames = useMemo(() => {
|
||||
if (!cameras || !config) return [];
|
||||
|
||||
const streamNames = new Set<string>();
|
||||
cameras.forEach((camera) => {
|
||||
@ -32,56 +32,13 @@ export default function useCameraLiveMode(
|
||||
}
|
||||
});
|
||||
|
||||
return streamNames.size > 0
|
||||
? Array.from(streamNames).sort().join(",")
|
||||
: null;
|
||||
return Array.from(streamNames);
|
||||
}, [cameras, config, activeStreams]);
|
||||
|
||||
const streamsFetcher = useCallback(async (key: string) => {
|
||||
const streamNames = key.split(",");
|
||||
|
||||
const metadataPromises = streamNames.map(async (streamName) => {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${baseUrl}api/go2rtc/streams/${streamName}`,
|
||||
{
|
||||
priority: "low",
|
||||
},
|
||||
);
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
return { streamName, data };
|
||||
}
|
||||
return { streamName, data: null };
|
||||
} catch (error) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(`Failed to fetch metadata for ${streamName}:`, error);
|
||||
return { streamName, data: null };
|
||||
}
|
||||
});
|
||||
|
||||
const results = await Promise.allSettled(metadataPromises);
|
||||
|
||||
const metadata: { [key: string]: LiveStreamMetadata } = {};
|
||||
results.forEach((result) => {
|
||||
if (result.status === "fulfilled" && result.value.data) {
|
||||
metadata[result.value.streamName] = result.value.data;
|
||||
}
|
||||
});
|
||||
|
||||
return metadata;
|
||||
}, []);
|
||||
|
||||
const { data: allStreamMetadata = {} } = useSWR<{
|
||||
[key: string]: LiveStreamMetadata;
|
||||
}>(restreamedStreamsKey, streamsFetcher, {
|
||||
revalidateOnFocus: false,
|
||||
revalidateOnReconnect: false,
|
||||
revalidateIfStale: false,
|
||||
dedupingInterval: 60000,
|
||||
});
|
||||
// Fetch stream metadata with deferred loading (doesn't block initial render)
|
||||
const streamMetadata = useDeferredStreamMetadata(restreamedStreamNames);
|
||||
|
||||
// Compute live mode states
|
||||
const [preferredLiveModes, setPreferredLiveModes] = useState<{
|
||||
[key: string]: LivePlayerMode;
|
||||
}>({});
|
||||
@ -122,10 +79,10 @@ export default function useCameraLiveMode(
|
||||
newPreferredLiveModes[camera.name] = isRestreamed ? "mse" : "jsmpeg";
|
||||
}
|
||||
|
||||
// check each stream for audio support
|
||||
// Check each stream for audio support
|
||||
if (isRestreamed) {
|
||||
Object.values(camera.live.streams).forEach((streamName) => {
|
||||
const metadata = allStreamMetadata?.[streamName];
|
||||
const metadata = streamMetadata[streamName];
|
||||
newSupportsAudioOutputStates[streamName] = {
|
||||
supportsAudio: metadata
|
||||
? metadata.producers.find(
|
||||
@ -150,7 +107,7 @@ export default function useCameraLiveMode(
|
||||
setPreferredLiveModes(newPreferredLiveModes);
|
||||
setIsRestreamedStates(newIsRestreamedStates);
|
||||
setSupportsAudioOutputStates(newSupportsAudioOutputStates);
|
||||
}, [cameras, config, windowVisible, allStreamMetadata]);
|
||||
}, [cameras, config, windowVisible, streamMetadata]);
|
||||
|
||||
const resetPreferredLiveMode = useCallback(
|
||||
(cameraName: string) => {
|
||||
@ -180,5 +137,6 @@ export default function useCameraLiveMode(
|
||||
resetPreferredLiveMode,
|
||||
isRestreamedStates,
|
||||
supportsAudioOutputStates,
|
||||
streamMetadata,
|
||||
};
|
||||
}
|
||||
|
||||
90
web/src/hooks/use-deferred-stream-metadata.ts
Normal file
90
web/src/hooks/use-deferred-stream-metadata.ts
Normal file
@ -0,0 +1,90 @@
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import { useCallback, useEffect, useState, useMemo } from "react";
|
||||
import useSWR from "swr";
|
||||
import { LiveStreamMetadata } from "@/types/live";
|
||||
|
||||
const FETCH_TIMEOUT_MS = 10000;
|
||||
const DEFER_DELAY_MS = 2000;
|
||||
|
||||
/**
|
||||
* Hook that fetches go2rtc stream metadata with deferred loading.
|
||||
*
|
||||
* Metadata fetching is delayed to prevent blocking initial page load
|
||||
* and camera image requests.
|
||||
*
|
||||
* @param streamNames - Array of stream names to fetch metadata for
|
||||
* @returns Object containing stream metadata keyed by stream name
|
||||
*/
|
||||
export default function useDeferredStreamMetadata(streamNames: string[]) {
|
||||
const [fetchEnabled, setFetchEnabled] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
const timeoutId = setTimeout(() => {
|
||||
setFetchEnabled(true);
|
||||
}, DEFER_DELAY_MS);
|
||||
|
||||
return () => clearTimeout(timeoutId);
|
||||
}, []);
|
||||
|
||||
const swrKey = useMemo(() => {
|
||||
if (!fetchEnabled || streamNames.length === 0) return null;
|
||||
// Use spread to avoid mutating the original array
|
||||
return `deferred-streams:${[...streamNames].sort().join(",")}`;
|
||||
}, [fetchEnabled, streamNames]);
|
||||
|
||||
const fetcher = useCallback(async (key: string) => {
|
||||
// Extract stream names from key (remove prefix)
|
||||
const names = key.replace("deferred-streams:", "").split(",");
|
||||
|
||||
const promises = names.map(async (streamName) => {
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${baseUrl}api/go2rtc/streams/${streamName}`,
|
||||
{
|
||||
priority: "low",
|
||||
signal: controller.signal,
|
||||
},
|
||||
);
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
return { streamName, data };
|
||||
}
|
||||
return { streamName, data: null };
|
||||
} catch (error) {
|
||||
clearTimeout(timeoutId);
|
||||
if ((error as Error).name !== "AbortError") {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(`Failed to fetch metadata for ${streamName}:`, error);
|
||||
}
|
||||
return { streamName, data: null };
|
||||
}
|
||||
});
|
||||
|
||||
const results = await Promise.allSettled(promises);
|
||||
|
||||
const metadata: { [key: string]: LiveStreamMetadata } = {};
|
||||
results.forEach((result) => {
|
||||
if (result.status === "fulfilled" && result.value.data) {
|
||||
metadata[result.value.streamName] = result.value.data;
|
||||
}
|
||||
});
|
||||
|
||||
return metadata;
|
||||
}, []);
|
||||
|
||||
const { data: metadata = {} } = useSWR<{
|
||||
[key: string]: LiveStreamMetadata;
|
||||
}>(swrKey, fetcher, {
|
||||
revalidateOnFocus: false,
|
||||
revalidateOnReconnect: false,
|
||||
revalidateIfStale: false,
|
||||
dedupingInterval: 60000,
|
||||
});
|
||||
|
||||
return metadata;
|
||||
}
|
||||
@ -24,6 +24,7 @@ import "react-resizable/css/styles.css";
|
||||
import {
|
||||
AudioState,
|
||||
LivePlayerMode,
|
||||
LiveStreamMetadata,
|
||||
StatsState,
|
||||
VolumeState,
|
||||
} from "@/types/live";
|
||||
@ -47,7 +48,6 @@ import {
|
||||
TooltipContent,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { Toaster } from "@/components/ui/sonner";
|
||||
import useCameraLiveMode from "@/hooks/use-camera-live-mode";
|
||||
import LiveContextMenu from "@/components/menu/LiveContextMenu";
|
||||
import { useStreamingSettings } from "@/context/streaming-settings-provider";
|
||||
import { useTranslation } from "react-i18next";
|
||||
@ -65,6 +65,16 @@ type DraggableGridLayoutProps = {
|
||||
setIsEditMode: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
fullscreen: boolean;
|
||||
toggleFullscreen: () => void;
|
||||
preferredLiveModes: { [key: string]: LivePlayerMode };
|
||||
setPreferredLiveModes: React.Dispatch<
|
||||
React.SetStateAction<{ [key: string]: LivePlayerMode }>
|
||||
>;
|
||||
resetPreferredLiveMode: (cameraName: string) => void;
|
||||
isRestreamedStates: { [key: string]: boolean };
|
||||
supportsAudioOutputStates: {
|
||||
[key: string]: { supportsAudio: boolean; cameraName: string };
|
||||
};
|
||||
streamMetadata: { [key: string]: LiveStreamMetadata };
|
||||
};
|
||||
export default function DraggableGridLayout({
|
||||
cameras,
|
||||
@ -79,6 +89,12 @@ export default function DraggableGridLayout({
|
||||
setIsEditMode,
|
||||
fullscreen,
|
||||
toggleFullscreen,
|
||||
preferredLiveModes,
|
||||
setPreferredLiveModes,
|
||||
resetPreferredLiveMode,
|
||||
isRestreamedStates,
|
||||
supportsAudioOutputStates,
|
||||
streamMetadata,
|
||||
}: DraggableGridLayoutProps) {
|
||||
const { t } = useTranslation(["views/live"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
@ -98,33 +114,6 @@ export default function DraggableGridLayout({
|
||||
}
|
||||
}, [allGroupsStreamingSettings, cameraGroup]);
|
||||
|
||||
const activeStreams = useMemo(() => {
|
||||
const streams: { [cameraName: string]: string } = {};
|
||||
cameras.forEach((camera) => {
|
||||
const availableStreams = camera.live.streams || {};
|
||||
const streamNameFromSettings =
|
||||
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
|
||||
const streamExists =
|
||||
streamNameFromSettings &&
|
||||
Object.values(availableStreams).includes(streamNameFromSettings);
|
||||
|
||||
const streamName = streamExists
|
||||
? streamNameFromSettings
|
||||
: Object.values(availableStreams)[0] || "";
|
||||
|
||||
streams[camera.name] = streamName;
|
||||
});
|
||||
return streams;
|
||||
}, [cameras, currentGroupStreamingSettings]);
|
||||
|
||||
const {
|
||||
preferredLiveModes,
|
||||
setPreferredLiveModes,
|
||||
resetPreferredLiveMode,
|
||||
isRestreamedStates,
|
||||
supportsAudioOutputStates,
|
||||
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
|
||||
|
||||
// grid layout
|
||||
|
||||
const ResponsiveGridLayout = useMemo(() => WidthProvider(Responsive), []);
|
||||
@ -624,6 +613,7 @@ export default function DraggableGridLayout({
|
||||
resetPreferredLiveMode(camera.name)
|
||||
}
|
||||
config={config}
|
||||
streamMetadata={streamMetadata}
|
||||
>
|
||||
<LivePlayer
|
||||
key={camera.name}
|
||||
@ -838,6 +828,7 @@ type GridLiveContextMenuProps = {
|
||||
unmuteAll: () => void;
|
||||
resetPreferredLiveMode: () => void;
|
||||
config?: FrigateConfig;
|
||||
streamMetadata?: { [key: string]: LiveStreamMetadata };
|
||||
};
|
||||
|
||||
const GridLiveContextMenu = React.forwardRef<
|
||||
@ -868,6 +859,7 @@ const GridLiveContextMenu = React.forwardRef<
|
||||
unmuteAll,
|
||||
resetPreferredLiveMode,
|
||||
config,
|
||||
streamMetadata,
|
||||
...props
|
||||
},
|
||||
ref,
|
||||
@ -899,6 +891,7 @@ const GridLiveContextMenu = React.forwardRef<
|
||||
unmuteAll={unmuteAll}
|
||||
resetPreferredLiveMode={resetPreferredLiveMode}
|
||||
config={config}
|
||||
streamMetadata={streamMetadata}
|
||||
>
|
||||
{children}
|
||||
</LiveContextMenu>
|
||||
|
||||
@ -265,6 +265,7 @@ export default function LiveDashboardView({
|
||||
resetPreferredLiveMode,
|
||||
isRestreamedStates,
|
||||
supportsAudioOutputStates,
|
||||
streamMetadata,
|
||||
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
|
||||
|
||||
const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
|
||||
@ -650,6 +651,12 @@ export default function LiveDashboardView({
|
||||
setIsEditMode={setIsEditMode}
|
||||
fullscreen={fullscreen}
|
||||
toggleFullscreen={toggleFullscreen}
|
||||
preferredLiveModes={preferredLiveModes}
|
||||
setPreferredLiveModes={setPreferredLiveModes}
|
||||
resetPreferredLiveMode={resetPreferredLiveMode}
|
||||
isRestreamedStates={isRestreamedStates}
|
||||
supportsAudioOutputStates={supportsAudioOutputStates}
|
||||
streamMetadata={streamMetadata}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
|
||||
@ -478,33 +478,32 @@ export default function AuthenticationView({
|
||||
<TableCell className="text-right">
|
||||
<TooltipProvider>
|
||||
<div className="flex items-center justify-end gap-2">
|
||||
{user.username !== "admin" &&
|
||||
user.username !== "viewer" && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="outline"
|
||||
className="h-8 px-2"
|
||||
onClick={() => {
|
||||
setSelectedUser(user.username);
|
||||
setSelectedUserRole(
|
||||
user.role || "viewer",
|
||||
);
|
||||
setShowRoleChange(true);
|
||||
}}
|
||||
>
|
||||
<LuUserCog className="size-3.5" />
|
||||
<span className="ml-1.5 hidden sm:inline-block">
|
||||
{t("role.title", { ns: "common" })}
|
||||
</span>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{t("users.table.changeRole")}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
{user.username !== "admin" && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="outline"
|
||||
className="h-8 px-2"
|
||||
onClick={() => {
|
||||
setSelectedUser(user.username);
|
||||
setSelectedUserRole(
|
||||
user.role || "viewer",
|
||||
);
|
||||
setShowRoleChange(true);
|
||||
}}
|
||||
>
|
||||
<LuUserCog className="size-3.5" />
|
||||
<span className="ml-1.5 hidden sm:inline-block">
|
||||
{t("role.title", { ns: "common" })}
|
||||
</span>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{t("users.table.changeRole")}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user