mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-16 10:06:42 +03:00
Compare commits
5 Commits
1d2e44533d
...
ade3c1bdd3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ade3c1bdd3 | ||
|
|
28b0ad782a | ||
|
|
644c7fa6b4 | ||
|
|
88a8de0b1c | ||
|
|
0fabfa8ca8 |
2
.github/copilot-instructions.md
vendored
Normal file
2
.github/copilot-instructions.md
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
Never write strings in the frontend directly, always write to and reference the relevant translations file.
|
||||
Always conform new and refactored code to the existing coding style in the project.
|
||||
@ -1,6 +1,6 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
cython==3.0.*; platform_machine == 'x86_64'
|
||||
cython==3.2.*; platform_machine == 'x86_64'
|
||||
nvidia_cuda_cupti_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu12==12.5.3.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu12==9.3.0.*; platform_machine == 'x86_64'
|
||||
|
||||
@ -116,4 +116,4 @@ Along with individual review item summaries, Generative AI provides the ability
|
||||
|
||||
Review reports can be requested via the [API](/integrations/api#review-summarization) by sending a POST request to `/api/review/summarize/start/{start_ts}/end/{end_ts}` with Unix timestamps.
|
||||
|
||||
For Home Assistant users, there is a built-in service (`frigate.generate_review_summary`) that makes it easy to request review reports as part of automations or scripts. This allows you to automatically generate daily summaries, vacation reports, or custom time period reports based on your specific needs.
|
||||
For Home Assistant users, there is a built-in service (`frigate.review_summarize`) that makes it easy to request review reports as part of automations or scripts. This allows you to automatically generate daily summaries, vacation reports, or custom time period reports based on your specific needs.
|
||||
|
||||
@ -28,7 +28,6 @@ To create a poly mask:
|
||||
5. Click the plus icon under the type of mask or zone you would like to create
|
||||
6. Click on the camera's latest image to create the points for a masked area. Click the first point again to close the polygon.
|
||||
7. When you've finished creating your mask, press Save.
|
||||
8. Restart Frigate to apply your changes.
|
||||
|
||||
Your config file will be updated with the relative coordinates of the mask/zone:
|
||||
|
||||
|
||||
@ -1002,10 +1002,6 @@ ui:
|
||||
# full: 8:15:22 PM Mountain Standard Time
|
||||
# (default: shown below).
|
||||
time_style: medium
|
||||
# Optional: Ability to manually override the date / time styling to use strftime format
|
||||
# https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html
|
||||
# possible values are shown above (default: not set)
|
||||
strftime_fmt: "%Y/%m/%d %H:%M"
|
||||
# Optional: Set the unit system to either "imperial" or "metric" (default: metric)
|
||||
# Used in the UI and in MQTT topics
|
||||
unit_system: metric
|
||||
|
||||
@ -837,7 +837,19 @@ async def recording_clip(
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
async def vod_ts(
|
||||
camera_name: str,
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
force_discontinuity: bool = False,
|
||||
):
|
||||
logger.debug(
|
||||
"VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s",
|
||||
camera_name,
|
||||
start_ts,
|
||||
end_ts,
|
||||
force_discontinuity,
|
||||
)
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
@ -862,6 +874,14 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
|
||||
recording: Recordings
|
||||
for recording in recordings:
|
||||
logger.debug(
|
||||
"VOD: processing recording: %s start=%s end=%s duration=%s",
|
||||
recording.path,
|
||||
recording.start_time,
|
||||
recording.end_time,
|
||||
recording.duration,
|
||||
)
|
||||
|
||||
clip = {"type": "source", "path": recording.path}
|
||||
duration = int(recording.duration * 1000)
|
||||
|
||||
@ -870,6 +890,11 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
inpoint = int((start_ts - recording.start_time) * 1000)
|
||||
clip["clipFrom"] = inpoint
|
||||
duration -= inpoint
|
||||
logger.debug(
|
||||
"VOD: applied clipFrom %sms to %s",
|
||||
inpoint,
|
||||
recording.path,
|
||||
)
|
||||
|
||||
# adjust end if recording.end_time is after end_ts
|
||||
if recording.end_time > end_ts:
|
||||
@ -877,12 +902,23 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
|
||||
if duration < min_duration_ms:
|
||||
# skip if the clip has no valid duration (too short to contain frames)
|
||||
logger.debug(
|
||||
"VOD: skipping recording %s - resulting duration %sms too short",
|
||||
recording.path,
|
||||
duration,
|
||||
)
|
||||
continue
|
||||
|
||||
if min_duration_ms <= duration < max_duration_ms:
|
||||
clip["keyFrameDurations"] = [duration]
|
||||
clips.append(clip)
|
||||
durations.append(duration)
|
||||
logger.debug(
|
||||
"VOD: added clip %s duration_ms=%s clipFrom=%s",
|
||||
recording.path,
|
||||
duration,
|
||||
clip.get("clipFrom"),
|
||||
)
|
||||
else:
|
||||
logger.warning(f"Recording clip is missing or empty: {recording.path}")
|
||||
|
||||
@ -902,7 +938,7 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
return JSONResponse(
|
||||
content={
|
||||
"cache": hour_ago.timestamp() > start_ts,
|
||||
"discontinuity": False,
|
||||
"discontinuity": force_discontinuity,
|
||||
"consistentSequenceMediaInfo": True,
|
||||
"durations": durations,
|
||||
"segment_duration": max(durations),
|
||||
@ -986,6 +1022,19 @@ async def vod_event(
|
||||
return vod_response
|
||||
|
||||
|
||||
@router.get(
|
||||
"/vod/clip/{camera_name}/start/{start_ts}/end/{end_ts}",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
async def vod_clip(
|
||||
camera_name: str,
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
):
|
||||
return await vod_ts(camera_name, start_ts, end_ts, force_discontinuity=True)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}/snapshot.jpg",
|
||||
description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
|
||||
|
||||
@ -390,7 +390,20 @@ class WebPushClient(Communicator):
|
||||
|
||||
message = payload["after"]["data"]["metadata"]["scene"]
|
||||
else:
|
||||
title = f"{titlecase(', '.join(sorted_objects).replace('_', ' '))}{' was' if state == 'end' else ''} detected in {titlecase(', '.join(payload['after']['data']['zones']).replace('_', ' '))}"
|
||||
zone_names = payload["after"]["data"]["zones"]
|
||||
formatted_zone_names = []
|
||||
|
||||
for zone_name in zone_names:
|
||||
if zone_name in self.config.cameras[camera].zones:
|
||||
formatted_zone_names.append(
|
||||
self.config.cameras[camera]
|
||||
.zones[zone_name]
|
||||
.get_formatted_name(zone_name)
|
||||
)
|
||||
else:
|
||||
formatted_zone_names.append(titlecase(zone_name.replace("_", " ")))
|
||||
|
||||
title = f"{titlecase(', '.join(sorted_objects).replace('_', ' '))}{' was' if state == 'end' else ''} detected in {', '.join(formatted_zone_names)}"
|
||||
message = f"Detected on {camera_name}"
|
||||
|
||||
if ended:
|
||||
|
||||
@ -37,9 +37,6 @@ class UIConfig(FrigateBaseModel):
|
||||
time_style: DateTimeStyleEnum = Field(
|
||||
default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
|
||||
)
|
||||
strftime_fmt: Optional[str] = Field(
|
||||
default=None, title="Override date and time format using strftime syntax."
|
||||
)
|
||||
unit_system: UnitSystemEnum = Field(
|
||||
default=UnitSystemEnum.metric, title="The unit system to use for measurements."
|
||||
)
|
||||
|
||||
@ -639,14 +639,14 @@ def write_classification_attempt(
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
cv2.imwrite(file, frame)
|
||||
|
||||
files = sorted(
|
||||
filter(lambda f: (f.endswith(".webp")), os.listdir(folder)),
|
||||
key=lambda f: os.path.getctime(os.path.join(folder, f)),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
# delete oldest face image if maximum is reached
|
||||
try:
|
||||
files = sorted(
|
||||
filter(lambda f: (f.endswith(".webp")), os.listdir(folder)),
|
||||
key=lambda f: os.path.getctime(os.path.join(folder, f)),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
if len(files) > max_files:
|
||||
os.unlink(os.path.join(folder, files[-1]))
|
||||
except FileNotFoundError:
|
||||
|
||||
@ -13,9 +13,6 @@
|
||||
"time_style": {
|
||||
"label": "Override UI timeStyle."
|
||||
},
|
||||
"strftime_fmt": {
|
||||
"label": "Override date and time format using strftime syntax."
|
||||
},
|
||||
"unit_system": {
|
||||
"label": "The unit system to use for measurements."
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
{
|
||||
"documentTitle": "Classification Models",
|
||||
"documentTitle": "Classification Models - Frigate",
|
||||
"details": {
|
||||
"scoreInfo": "Score represents the average classification confidence across all detections of this object."
|
||||
},
|
||||
@ -83,6 +83,7 @@
|
||||
"aria": "Select Recent Classifications"
|
||||
},
|
||||
"categories": "Classes",
|
||||
"none": "None",
|
||||
"createCategory": {
|
||||
"new": "Create New Class"
|
||||
},
|
||||
|
||||
@ -77,7 +77,7 @@
|
||||
"millisecondsToOffset": "Milliseconds to offset detect annotations by. <em>Default: 0</em>",
|
||||
"tips": "Lower the value if the video playback is ahead of the boxes and path points, and increase the value if the video playback is behind them. This value can be negative.",
|
||||
"toast": {
|
||||
"success": "Annotation offset for {{camera}} has been saved to the config file. Restart Frigate to apply your changes."
|
||||
"success": "Annotation offset for {{camera}} has been saved to the config file."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@ -534,7 +534,7 @@
|
||||
}
|
||||
},
|
||||
"toast": {
|
||||
"success": "Zone ({{zoneName}}) has been saved. Restart Frigate to apply changes."
|
||||
"success": "Zone ({{zoneName}}) has been saved."
|
||||
}
|
||||
},
|
||||
"motionMasks": {
|
||||
@ -558,8 +558,8 @@
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"title": "{{polygonName}} has been saved. Restart Frigate to apply changes.",
|
||||
"noName": "Motion Mask has been saved. Restart Frigate to apply changes."
|
||||
"title": "{{polygonName}} has been saved.",
|
||||
"noName": "Motion Mask has been saved."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -583,8 +583,8 @@
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"title": "{{polygonName}} has been saved. Restart Frigate to apply changes.",
|
||||
"noName": "Object Mask has been saved. Restart Frigate to apply changes."
|
||||
"title": "{{polygonName}} has been saved.",
|
||||
"noName": "Object Mask has been saved."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,7 +131,9 @@ export default function ClassificationSelectionDialog({
|
||||
className="flex cursor-pointer gap-2 smart-capitalize"
|
||||
onClick={() => onCategorizeImage(category)}
|
||||
>
|
||||
{category.replaceAll("_", " ")}
|
||||
{category === "none"
|
||||
? t("none")
|
||||
: category.replaceAll("_", " ")}
|
||||
</SelectorItem>
|
||||
))}
|
||||
<Separator />
|
||||
|
||||
@ -446,7 +446,7 @@ export function TrackingDetails({
|
||||
(event.end_time ?? Date.now() / 1000) + annotationOffset / 1000;
|
||||
const startTime = eventStartRecord - REVIEW_PADDING;
|
||||
const endTime = eventEndRecord + REVIEW_PADDING;
|
||||
const playlist = `${baseUrl}vod/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`;
|
||||
const playlist = `${baseUrl}vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`;
|
||||
|
||||
return {
|
||||
playlist,
|
||||
@ -559,7 +559,6 @@ export function TrackingDetails({
|
||||
isDetailMode={true}
|
||||
camera={event.camera}
|
||||
currentTimeOverride={currentTime}
|
||||
enableGapControllerRecovery={true}
|
||||
/>
|
||||
{isVideoLoading && (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||
|
||||
@ -180,7 +180,9 @@ export function ClassFilterContent({
|
||||
{allClasses.map((item) => (
|
||||
<FilterSwitch
|
||||
key={item}
|
||||
label={item.replaceAll("_", " ")}
|
||||
label={
|
||||
item === "none" ? t("none") : item.replaceAll("_", " ")
|
||||
}
|
||||
isChecked={classes?.includes(item) ?? false}
|
||||
onCheckedChange={(isChecked) => {
|
||||
if (isChecked) {
|
||||
|
||||
@ -57,7 +57,6 @@ type HlsVideoPlayerProps = {
|
||||
isDetailMode?: boolean;
|
||||
camera?: string;
|
||||
currentTimeOverride?: number;
|
||||
enableGapControllerRecovery?: boolean;
|
||||
};
|
||||
|
||||
export default function HlsVideoPlayer({
|
||||
@ -82,7 +81,6 @@ export default function HlsVideoPlayer({
|
||||
isDetailMode = false,
|
||||
camera,
|
||||
currentTimeOverride,
|
||||
enableGapControllerRecovery = false,
|
||||
}: HlsVideoPlayerProps) {
|
||||
const { t } = useTranslation("components/player");
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
@ -173,21 +171,12 @@ export default function HlsVideoPlayer({
|
||||
}
|
||||
|
||||
// Base HLS configuration
|
||||
const baseConfig: Partial<HlsConfig> = {
|
||||
const hlsConfig: Partial<HlsConfig> = {
|
||||
maxBufferLength: 10,
|
||||
maxBufferSize: 20 * 1000 * 1000,
|
||||
startPosition: currentSource.startPosition,
|
||||
};
|
||||
|
||||
const hlsConfig = { ...baseConfig };
|
||||
|
||||
if (enableGapControllerRecovery) {
|
||||
hlsConfig.highBufferWatchdogPeriod = 1; // Check for stalls every 1 second (default: 3)
|
||||
hlsConfig.nudgeOffset = 0.2; // Nudge playhead forward 0.2s when stalled (default: 0.1)
|
||||
hlsConfig.nudgeMaxRetry = 5; // Try up to 5 nudges before giving up (default: 3)
|
||||
hlsConfig.maxBufferHole = 0.5; // Tolerate up to 0.5s gaps between fragments (default: 0.1)
|
||||
}
|
||||
|
||||
hlsRef.current = new Hls(hlsConfig);
|
||||
hlsRef.current.attachMedia(videoRef.current);
|
||||
hlsRef.current.loadSource(currentSource.playlist);
|
||||
@ -201,13 +190,7 @@ export default function HlsVideoPlayer({
|
||||
hlsRef.current.destroy();
|
||||
}
|
||||
};
|
||||
}, [
|
||||
videoRef,
|
||||
hlsRef,
|
||||
useHlsCompat,
|
||||
currentSource,
|
||||
enableGapControllerRecovery,
|
||||
]);
|
||||
}, [videoRef, hlsRef, useHlsCompat, currentSource]);
|
||||
|
||||
// state handling
|
||||
|
||||
|
||||
@ -371,7 +371,12 @@ export default function FaceLibrary() {
|
||||
{selectedFaces?.length > 0 ? (
|
||||
<div className="flex items-center justify-center gap-2">
|
||||
<div className="mx-1 flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||
<div className="p-1">{`${selectedFaces.length} selected`}</div>
|
||||
<div className="p-1">
|
||||
{t("selected", {
|
||||
ns: "views/event",
|
||||
count: selectedFaces.length,
|
||||
})}
|
||||
</div>
|
||||
<div className="p-1">{"|"}</div>
|
||||
<div
|
||||
className="cursor-pointer p-2 text-primary hover:rounded-lg hover:bg-secondary"
|
||||
|
||||
@ -6,7 +6,6 @@ export interface UiConfig {
|
||||
time_format?: "browser" | "12hour" | "24hour";
|
||||
date_style?: "full" | "long" | "medium" | "short";
|
||||
time_style?: "full" | "long" | "medium" | "short";
|
||||
strftime_fmt?: string;
|
||||
dashboard: boolean;
|
||||
order: number;
|
||||
unit_system?: "metric" | "imperial";
|
||||
|
||||
@ -84,6 +84,12 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
||||
const [page, setPage] = useState<string>("train");
|
||||
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
|
||||
|
||||
// title
|
||||
|
||||
useEffect(() => {
|
||||
document.title = `${model.name} - ${t("documentTitle")}`;
|
||||
}, [model.name, t]);
|
||||
|
||||
// model state
|
||||
|
||||
const [wasTraining, setWasTraining] = useState(false);
|
||||
@ -416,7 +422,12 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
||||
)}
|
||||
>
|
||||
<div className="flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||
<div className="p-1">{`${selectedImages.length} selected`}</div>
|
||||
<div className="p-1">
|
||||
{t("selected", {
|
||||
ns: "views/event",
|
||||
count: selectedImages.length,
|
||||
})}
|
||||
</div>
|
||||
<div className="p-1">{"|"}</div>
|
||||
<div
|
||||
className="cursor-pointer p-2 text-primary hover:rounded-lg hover:bg-secondary"
|
||||
@ -676,7 +687,7 @@ function LibrarySelector({
|
||||
className="flex-grow cursor-pointer capitalize"
|
||||
onClick={() => setPageToggle(id)}
|
||||
>
|
||||
{id.replaceAll("_", " ")}
|
||||
{id === "none" ? t("none") : id.replaceAll("_", " ")}
|
||||
<span className="ml-2 text-muted-foreground">
|
||||
({dataset?.[id].length})
|
||||
</span>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user