mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-08 06:25:27 +03:00
Compare commits
4 Commits
e8c034a6e5
...
d1b88b53d1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1b88b53d1 | ||
|
|
a182385618 | ||
|
|
088e1ad7ef | ||
|
|
011ad8eda7 |
@ -32,11 +32,14 @@ RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
|
||||
FROM deps AS deps-prelim
|
||||
|
||||
COPY docker/rocm/debian-backports.sources /etc/apt/sources.list.d/debian-backports.sources
|
||||
RUN apt-get update && \
|
||||
# install_deps.sh upgraded libstdc++6 from trixie for Battlemage; the matching
|
||||
# -dev package must also come from trixie or apt refuses to satisfy it.
|
||||
RUN echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y libnuma1 && \
|
||||
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers && \
|
||||
# Install C++ standard library headers for HIPRTC kernel compilation fallback
|
||||
apt-get install -qq -y libstdc++-12-dev && \
|
||||
apt-get install -qq -y -t trixie libstdc++-14-dev && \
|
||||
rm -f /etc/apt/sources.list.d/trixie.list && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /opt/frigate
|
||||
|
||||
@ -5,13 +5,15 @@ import logging
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
import zipfile
|
||||
from collections import deque
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
from typing import Iterator, List, Optional
|
||||
|
||||
import psutil
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filepath
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from pathvalidate import sanitize_filename, sanitize_filepath
|
||||
from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
@ -361,6 +363,136 @@ def get_export_case(case_id: str):
|
||||
)
|
||||
|
||||
|
||||
_ZIP_STREAM_CHUNK_SIZE = 1024 * 1024 # 1 MiB
|
||||
|
||||
|
||||
class _StreamingZipBuffer:
|
||||
"""File-like sink for ZipFile that exposes written bytes via drain().
|
||||
|
||||
ZipFile writes synchronously into this buffer; the generator drains the
|
||||
queue between writes so StreamingResponse can yield bytes without
|
||||
materializing the whole archive in memory.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._queue: deque[bytes] = deque()
|
||||
self._offset = 0
|
||||
|
||||
def write(self, data: bytes) -> int:
|
||||
if data:
|
||||
self._queue.append(bytes(data))
|
||||
self._offset += len(data)
|
||||
return len(data)
|
||||
|
||||
def tell(self) -> int:
|
||||
return self._offset
|
||||
|
||||
def flush(self) -> None:
|
||||
pass
|
||||
|
||||
def drain(self) -> Iterator[bytes]:
|
||||
while self._queue:
|
||||
yield self._queue.popleft()
|
||||
|
||||
|
||||
def _unique_archive_name(export: Export, used: set[str]) -> str:
|
||||
base = sanitize_filename(export.name) if export.name else None
|
||||
if not base:
|
||||
base = f"{export.camera}_{int(datetime.datetime.timestamp(export.date))}"
|
||||
|
||||
candidate = f"{base}.mp4"
|
||||
counter = 1
|
||||
while candidate in used:
|
||||
candidate = f"{base}_{counter}.mp4"
|
||||
counter += 1
|
||||
|
||||
used.add(candidate)
|
||||
return candidate
|
||||
|
||||
|
||||
def _stream_case_archive(exports: List[Export]) -> Iterator[bytes]:
|
||||
"""Yield bytes of a zip archive built from the given exports' mp4 files."""
|
||||
buffer = _StreamingZipBuffer()
|
||||
used_names: set[str] = set()
|
||||
|
||||
# ZIP_STORED: mp4 is already compressed, recompressing wastes CPU for ~0% size win.
|
||||
with zipfile.ZipFile(
|
||||
buffer,
|
||||
mode="w",
|
||||
compression=zipfile.ZIP_STORED,
|
||||
allowZip64=True,
|
||||
) as archive:
|
||||
for export in exports:
|
||||
source = Path(export.video_path)
|
||||
if not source.exists():
|
||||
continue
|
||||
|
||||
arcname = _unique_archive_name(export, used_names)
|
||||
|
||||
with (
|
||||
archive.open(arcname, mode="w", force_zip64=True) as entry,
|
||||
source.open("rb") as src,
|
||||
):
|
||||
while True:
|
||||
chunk = src.read(_ZIP_STREAM_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
entry.write(chunk)
|
||||
yield from buffer.drain()
|
||||
|
||||
yield from buffer.drain()
|
||||
|
||||
yield from buffer.drain()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases/{case_id}/download",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Download export case as zip",
|
||||
description="Streams a zip archive containing every completed export's mp4 for the given case.",
|
||||
)
|
||||
def download_export_case(
|
||||
case_id: str,
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
exports = list(
|
||||
Export.select()
|
||||
.where(
|
||||
Export.export_case == case_id,
|
||||
~Export.in_progress,
|
||||
Export.camera << allowed_cameras,
|
||||
)
|
||||
.order_by(Export.date.asc())
|
||||
)
|
||||
|
||||
if not exports:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "No exports available to download."},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
archive_base = sanitize_filename(case.name) if case.name else ""
|
||||
if not archive_base:
|
||||
archive_base = case_id
|
||||
|
||||
return StreamingResponse(
|
||||
_stream_case_archive(exports),
|
||||
media_type="application/zip",
|
||||
headers={
|
||||
"Content-Disposition": f'attachment; filename="{archive_base}.zip"',
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
|
||||
@ -20,6 +20,7 @@ class CameraConfigUpdateEnum(str, Enum):
|
||||
ffmpeg = "ffmpeg"
|
||||
live = "live"
|
||||
motion = "motion" # includes motion and motion masks
|
||||
mqtt = "mqtt"
|
||||
notifications = "notifications"
|
||||
objects = "objects"
|
||||
object_genai = "object_genai"
|
||||
@ -33,6 +34,7 @@ class CameraConfigUpdateEnum(str, Enum):
|
||||
lpr = "lpr"
|
||||
snapshots = "snapshots"
|
||||
timestamp_style = "timestamp_style"
|
||||
ui = "ui"
|
||||
zones = "zones"
|
||||
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ class ReviewMetadata(BaseModel):
|
||||
)
|
||||
title: str = Field(
|
||||
max_length=80,
|
||||
description="A short title characterizing what took place and where, under 10 words.",
|
||||
description="Under 10 words. Name the apparent purpose or outcome of the activity together with the location involved. Do not narrate or list the sequence of actions step by step.",
|
||||
)
|
||||
scene: str = Field(
|
||||
min_length=150,
|
||||
@ -36,7 +36,7 @@ class ReviewMetadata(BaseModel):
|
||||
)
|
||||
shortSummary: str = Field(
|
||||
min_length=70,
|
||||
max_length=100,
|
||||
max_length=120,
|
||||
description="A brief 2-sentence summary of the scene, suitable for notifications.",
|
||||
)
|
||||
confidence: float = Field(
|
||||
|
||||
@ -517,10 +517,16 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
for processor in self.post_processors:
|
||||
if isinstance(processor, ObjectDescriptionProcessor):
|
||||
processor.cleanup_event(event_id)
|
||||
continue
|
||||
|
||||
# Skip the event if not an object
|
||||
if event.data.get("type") != "object":
|
||||
for processor in self.post_processors:
|
||||
if isinstance(processor, ObjectDescriptionProcessor):
|
||||
processor.cleanup_event(event_id)
|
||||
continue
|
||||
|
||||
# Extract valid thumbnail
|
||||
|
||||
@ -205,6 +205,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
self.transcription_thread.start()
|
||||
|
||||
self.was_enabled = camera.enabled
|
||||
self.was_audio_enabled = camera.audio.enabled
|
||||
|
||||
def detect_audio(self, audio: np.ndarray) -> None:
|
||||
if not self.camera_config.audio.enabled or self.stop_event.is_set():
|
||||
@ -363,6 +364,17 @@ class AudioEventMaintainer(threading.Thread):
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
audio_enabled = self.camera_config.audio.enabled
|
||||
if audio_enabled != self.was_audio_enabled:
|
||||
if not audio_enabled:
|
||||
self.logger.debug(
|
||||
f"Disabling audio detections for {self.camera_config.name}, ending events"
|
||||
)
|
||||
self.requestor.send_data(
|
||||
EXPIRE_AUDIO_ACTIVITY, self.camera_config.name
|
||||
)
|
||||
self.was_audio_enabled = audio_enabled
|
||||
|
||||
self.read_audio()
|
||||
|
||||
if self.audio_listener:
|
||||
|
||||
@ -201,9 +201,10 @@ Each line represents a detection state, not necessarily unique individuals. The
|
||||
except json.JSONDecodeError as je:
|
||||
logger.error("Failed to parse review description JSON: %s", je)
|
||||
return None
|
||||
# observations is required on the model; fill an empty default
|
||||
# observations and confidence are required on the model; fill an empty default
|
||||
# if the response omitted it so attribute access stays safe.
|
||||
raw.setdefault("observations", [])
|
||||
raw.setdefault("confidence", 0.0)
|
||||
metadata = ReviewMetadata.model_construct(**raw)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
|
||||
@ -317,16 +317,16 @@ class CameraWatchdog(threading.Thread):
|
||||
if camera != self.config.name:
|
||||
continue
|
||||
|
||||
if topic.endswith(RecordingsDataTypeEnum.valid.value):
|
||||
self.logger.debug(
|
||||
f"Latest valid recording segment time on {camera}: {segment_time}"
|
||||
)
|
||||
self.latest_valid_segment_time = segment_time
|
||||
elif topic.endswith(RecordingsDataTypeEnum.invalid.value):
|
||||
if topic.endswith(RecordingsDataTypeEnum.invalid.value):
|
||||
self.logger.warning(
|
||||
f"Invalid recording segment detected for {camera} at {segment_time}"
|
||||
)
|
||||
self.latest_invalid_segment_time = segment_time
|
||||
elif topic.endswith(RecordingsDataTypeEnum.valid.value):
|
||||
self.logger.debug(
|
||||
f"Latest valid recording segment time on {camera}: {segment_time}"
|
||||
)
|
||||
self.latest_valid_segment_time = segment_time
|
||||
elif topic.endswith(RecordingsDataTypeEnum.latest.value):
|
||||
if segment_time is not None:
|
||||
self.latest_cache_segment_time = segment_time
|
||||
|
||||
@ -57,6 +57,7 @@ import { useTranslation } from "react-i18next";
|
||||
|
||||
import { IoMdArrowRoundBack } from "react-icons/io";
|
||||
import {
|
||||
LuDownload,
|
||||
LuFolderPlus,
|
||||
LuFolderX,
|
||||
LuPencil,
|
||||
@ -777,54 +778,76 @@ function Exports() {
|
||||
filters={["cameras"]}
|
||||
onUpdateFilter={setExportFilter}
|
||||
/>
|
||||
{isAdmin && (
|
||||
<div className="flex items-center gap-1 md:gap-2">
|
||||
<div className="flex items-center gap-1 md:gap-2">
|
||||
{(exportsByCase[selectedCase.id]?.length ?? 0) > 0 && (
|
||||
<Button
|
||||
asChild
|
||||
className="flex items-center gap-2 p-2"
|
||||
size="sm"
|
||||
aria-label={t("toolbar.addExport")}
|
||||
onClick={() => setCaseForAddExport(selectedCase)}
|
||||
aria-label={t("button.download", { ns: "common" })}
|
||||
>
|
||||
<LuPlus className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("toolbar.addExport")}
|
||||
</div>
|
||||
)}
|
||||
<a
|
||||
download
|
||||
href={`${baseUrl}api/cases/${selectedCase.id}/download`}
|
||||
>
|
||||
<LuDownload className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("button.download", { ns: "common" })}
|
||||
</div>
|
||||
)}
|
||||
</a>
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-2 p-2"
|
||||
size="sm"
|
||||
aria-label={t("toolbar.editCase")}
|
||||
onClick={() =>
|
||||
setCaseDialog({
|
||||
mode: "edit",
|
||||
exportCase: selectedCase,
|
||||
})
|
||||
}
|
||||
>
|
||||
<LuPencil className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("toolbar.editCase")}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-2 p-2"
|
||||
size="sm"
|
||||
aria-label={t("toolbar.deleteCase")}
|
||||
onClick={() => setCaseToDelete(selectedCase)}
|
||||
>
|
||||
<LuTrash2 className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("toolbar.deleteCase")}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
)}
|
||||
{isAdmin && (
|
||||
<>
|
||||
<Button
|
||||
className="flex items-center gap-2 p-2"
|
||||
size="sm"
|
||||
aria-label={t("toolbar.addExport")}
|
||||
onClick={() => setCaseForAddExport(selectedCase)}
|
||||
>
|
||||
<LuPlus className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("toolbar.addExport")}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-2 p-2"
|
||||
size="sm"
|
||||
aria-label={t("toolbar.editCase")}
|
||||
onClick={() =>
|
||||
setCaseDialog({
|
||||
mode: "edit",
|
||||
exportCase: selectedCase,
|
||||
})
|
||||
}
|
||||
>
|
||||
<LuPencil className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("toolbar.editCase")}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-2 p-2"
|
||||
size="sm"
|
||||
aria-label={t("toolbar.deleteCase")}
|
||||
onClick={() => setCaseToDelete(selectedCase)}
|
||||
>
|
||||
<LuTrash2 className="text-secondary-foreground" />
|
||||
{!isMobile && (
|
||||
<div className="text-primary">
|
||||
{t("toolbar.deleteCase")}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user