mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-31 16:25:26 +03:00
Compare commits
3 Commits
42652db30a
...
cbb582c2d2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cbb582c2d2 | ||
|
|
cfeb86646f | ||
|
|
7ec1d5d2c6 |
@ -510,6 +510,12 @@ record:
|
|||||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||||
expire_interval: 60
|
expire_interval: 60
|
||||||
|
# Optional: Maximum size of recordings in MB or string format (e.g. 10GB). (default: shown below)
|
||||||
|
# This serves as a hard limit for the size of the recordings for this camera.
|
||||||
|
# If the total size of recordings exceeds this limit, the oldest recordings will be deleted
|
||||||
|
# until the total size is below the limit, regardless of retention settings.
|
||||||
|
# 0 means no limit.
|
||||||
|
max_size: 0
|
||||||
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
|
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
|
||||||
sync_recordings: False
|
sync_recordings: False
|
||||||
# Optional: Continuous retention settings
|
# Optional: Continuous retention settings
|
||||||
|
|||||||
@ -1,10 +1,11 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional, Union
|
||||||
|
|
||||||
from pydantic import Field
|
from pydantic import Field, field_validator
|
||||||
|
|
||||||
from frigate.const import MAX_PRE_CAPTURE
|
from frigate.const import MAX_PRE_CAPTURE
|
||||||
from frigate.review.types import SeverityEnum
|
from frigate.review.types import SeverityEnum
|
||||||
|
from frigate.util.size import parse_size_to_mb
|
||||||
|
|
||||||
from ..base import FrigateBaseModel
|
from ..base import FrigateBaseModel
|
||||||
|
|
||||||
@ -81,6 +82,10 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
default=60,
|
default=60,
|
||||||
title="Number of minutes to wait between cleanup runs.",
|
title="Number of minutes to wait between cleanup runs.",
|
||||||
)
|
)
|
||||||
|
max_size: Union[float, str] = Field(
|
||||||
|
default=0,
|
||||||
|
title="Maximum size of recordings in MB or string format (e.g. 10GB).",
|
||||||
|
)
|
||||||
continuous: RecordRetainConfig = Field(
|
continuous: RecordRetainConfig = Field(
|
||||||
default_factory=RecordRetainConfig,
|
default_factory=RecordRetainConfig,
|
||||||
title="Continuous recording retention settings.",
|
title="Continuous recording retention settings.",
|
||||||
@ -104,6 +109,16 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
default=None, title="Keep track of original state of recording."
|
default=None, title="Keep track of original state of recording."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@field_validator("max_size", mode="before")
|
||||||
|
@classmethod
|
||||||
|
def parse_max_size(cls, v: Union[float, str], info: object) -> float:
|
||||||
|
if isinstance(v, str):
|
||||||
|
try:
|
||||||
|
return parse_size_to_mb(v)
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(f"Invalid size string: {v}")
|
||||||
|
return v
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event_pre_capture(self) -> int:
|
def event_pre_capture(self) -> int:
|
||||||
return max(
|
return max(
|
||||||
|
|||||||
@ -20,6 +20,17 @@ from frigate.util.time import get_tomorrow_at_time
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_directory_size(directory: str) -> float:
|
||||||
|
"""Get the size of a directory in MB."""
|
||||||
|
total_size = 0
|
||||||
|
for dirpath, dirnames, filenames in os.walk(directory):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
total_size += os.path.getsize(fp)
|
||||||
|
return total_size / 1000000
|
||||||
|
|
||||||
|
|
||||||
class RecordingCleanup(threading.Thread):
|
class RecordingCleanup(threading.Thread):
|
||||||
"""Cleanup existing recordings based on retention config."""
|
"""Cleanup existing recordings based on retention config."""
|
||||||
|
|
||||||
@ -120,6 +131,7 @@ class RecordingCleanup(threading.Thread):
|
|||||||
Recordings.objects,
|
Recordings.objects,
|
||||||
Recordings.motion,
|
Recordings.motion,
|
||||||
Recordings.dBFS,
|
Recordings.dBFS,
|
||||||
|
Recordings.segment_size,
|
||||||
)
|
)
|
||||||
.where(
|
.where(
|
||||||
(Recordings.camera == config.name)
|
(Recordings.camera == config.name)
|
||||||
@ -206,6 +218,10 @@ class RecordingCleanup(threading.Thread):
|
|||||||
Recordings.id << deleted_recordings_list[i : i + max_deletes]
|
Recordings.id << deleted_recordings_list[i : i + max_deletes]
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
|
# Check if we need to enforce max_size
|
||||||
|
if config.record.max_size > 0:
|
||||||
|
self.enforce_max_size(config, deleted_recordings)
|
||||||
|
|
||||||
previews: list[Previews] = (
|
previews: list[Previews] = (
|
||||||
Previews.select(
|
Previews.select(
|
||||||
Previews.id,
|
Previews.id,
|
||||||
@ -266,6 +282,52 @@ class RecordingCleanup(threading.Thread):
|
|||||||
Previews.id << deleted_previews_list[i : i + max_deletes]
|
Previews.id << deleted_previews_list[i : i + max_deletes]
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
|
def enforce_max_size(
|
||||||
|
self, config: CameraConfig, deleted_recordings: set[str]
|
||||||
|
) -> None:
|
||||||
|
"""Ensure that the camera recordings do not exceed the max size."""
|
||||||
|
# Get all recordings for this camera
|
||||||
|
recordings: Recordings = (
|
||||||
|
Recordings.select(
|
||||||
|
Recordings.id,
|
||||||
|
Recordings.path,
|
||||||
|
Recordings.segment_size,
|
||||||
|
)
|
||||||
|
.where(
|
||||||
|
(Recordings.camera == config.name)
|
||||||
|
& (Recordings.id.not_in(list(deleted_recordings)))
|
||||||
|
)
|
||||||
|
.order_by(Recordings.start_time)
|
||||||
|
.namedtuples()
|
||||||
|
.iterator()
|
||||||
|
)
|
||||||
|
|
||||||
|
total_size = 0
|
||||||
|
recordings_list = []
|
||||||
|
for recording in recordings:
|
||||||
|
recordings_list.append(recording)
|
||||||
|
total_size += recording.segment_size
|
||||||
|
|
||||||
|
# If the total size is less than the max size, we are good
|
||||||
|
if total_size <= config.record.max_size:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Delete recordings until we are under the max size
|
||||||
|
recordings_to_delete = []
|
||||||
|
for recording in recordings_list:
|
||||||
|
total_size -= recording.segment_size
|
||||||
|
recordings_to_delete.append(recording.id)
|
||||||
|
Path(recording.path).unlink(missing_ok=True)
|
||||||
|
if total_size <= config.record.max_size:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Delete from database
|
||||||
|
max_deletes = 100000
|
||||||
|
for i in range(0, len(recordings_to_delete), max_deletes):
|
||||||
|
Recordings.delete().where(
|
||||||
|
Recordings.id << recordings_to_delete[i : i + max_deletes]
|
||||||
|
).execute()
|
||||||
|
|
||||||
def expire_recordings(self) -> None:
|
def expire_recordings(self) -> None:
|
||||||
"""Delete recordings based on retention config."""
|
"""Delete recordings based on retention config."""
|
||||||
logger.debug("Start expire recordings.")
|
logger.debug("Start expire recordings.")
|
||||||
|
|||||||
@ -97,6 +97,7 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
self.object_recordings_info: dict[str, list] = defaultdict(list)
|
self.object_recordings_info: dict[str, list] = defaultdict(list)
|
||||||
self.audio_recordings_info: dict[str, list] = defaultdict(list)
|
self.audio_recordings_info: dict[str, list] = defaultdict(list)
|
||||||
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
|
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
|
||||||
|
self.unexpected_cache_files_logged: bool = False
|
||||||
|
|
||||||
async def move_files(self) -> None:
|
async def move_files(self) -> None:
|
||||||
cache_files = [
|
cache_files = [
|
||||||
@ -112,7 +113,14 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
for cache in cache_files:
|
for cache in cache_files:
|
||||||
cache_path = os.path.join(CACHE_DIR, cache)
|
cache_path = os.path.join(CACHE_DIR, cache)
|
||||||
basename = os.path.splitext(cache)[0]
|
basename = os.path.splitext(cache)[0]
|
||||||
camera, date = basename.rsplit("@", maxsplit=1)
|
try:
|
||||||
|
camera, date = basename.rsplit("@", maxsplit=1)
|
||||||
|
except ValueError:
|
||||||
|
if not self.unexpected_cache_files_logged:
|
||||||
|
logger.warning("Skipping unexpected files in cache")
|
||||||
|
self.unexpected_cache_files_logged = True
|
||||||
|
continue
|
||||||
|
|
||||||
start_time = datetime.datetime.strptime(
|
start_time = datetime.datetime.strptime(
|
||||||
date, CACHE_SEGMENT_FORMAT
|
date, CACHE_SEGMENT_FORMAT
|
||||||
).astimezone(datetime.timezone.utc)
|
).astimezone(datetime.timezone.utc)
|
||||||
@ -164,7 +172,13 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
cache_path = os.path.join(CACHE_DIR, cache)
|
cache_path = os.path.join(CACHE_DIR, cache)
|
||||||
basename = os.path.splitext(cache)[0]
|
basename = os.path.splitext(cache)[0]
|
||||||
camera, date = basename.rsplit("@", maxsplit=1)
|
try:
|
||||||
|
camera, date = basename.rsplit("@", maxsplit=1)
|
||||||
|
except ValueError:
|
||||||
|
if not self.unexpected_cache_files_logged:
|
||||||
|
logger.warning("Skipping unexpected files in cache")
|
||||||
|
self.unexpected_cache_files_logged = True
|
||||||
|
continue
|
||||||
|
|
||||||
# important that start_time is utc because recordings are stored and compared in utc
|
# important that start_time is utc because recordings are stored and compared in utc
|
||||||
start_time = datetime.datetime.strptime(
|
start_time = datetime.datetime.strptime(
|
||||||
|
|||||||
@ -429,6 +429,29 @@ class TestConfig(unittest.TestCase):
|
|||||||
frigate_config = FrigateConfig(**config)
|
frigate_config = FrigateConfig(**config)
|
||||||
assert "-rtsp_transport" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
assert "-rtsp_transport" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||||
|
|
||||||
|
def test_record_max_size_validation(self):
|
||||||
|
config = {
|
||||||
|
"mqtt": {"host": "mqtt"},
|
||||||
|
"record": {"max_size": "10GB"},
|
||||||
|
"cameras": {
|
||||||
|
"back": {
|
||||||
|
"ffmpeg": {
|
||||||
|
"inputs": [
|
||||||
|
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"detect": {
|
||||||
|
"height": 1080,
|
||||||
|
"width": 1920,
|
||||||
|
"fps": 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
frigate_config = FrigateConfig(**config)
|
||||||
|
assert frigate_config.record.max_size == 10000
|
||||||
|
|
||||||
def test_ffmpeg_params_global(self):
|
def test_ffmpeg_params_global(self):
|
||||||
config = {
|
config = {
|
||||||
"ffmpeg": {"input_args": "-re"},
|
"ffmpeg": {"input_args": "-re"},
|
||||||
|
|||||||
66
frigate/test/test_maintainer.py
Normal file
66
frigate/test/test_maintainer.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
# Mock complex imports before importing maintainer
|
||||||
|
sys.modules["frigate.comms.inter_process"] = MagicMock()
|
||||||
|
sys.modules["frigate.comms.detections_updater"] = MagicMock()
|
||||||
|
sys.modules["frigate.comms.recordings_updater"] = MagicMock()
|
||||||
|
sys.modules["frigate.config.camera.updater"] = MagicMock()
|
||||||
|
|
||||||
|
# Now import the class under test
|
||||||
|
from frigate.config import FrigateConfig # noqa: E402
|
||||||
|
from frigate.record.maintainer import RecordingMaintainer # noqa: E402
|
||||||
|
|
||||||
|
|
||||||
|
class TestMaintainer(unittest.IsolatedAsyncioTestCase):
|
||||||
|
async def test_move_files_survives_bad_filename(self):
|
||||||
|
config = MagicMock(spec=FrigateConfig)
|
||||||
|
config.cameras = {}
|
||||||
|
stop_event = MagicMock()
|
||||||
|
|
||||||
|
maintainer = RecordingMaintainer(config, stop_event)
|
||||||
|
|
||||||
|
# We need to mock end_time_cache to avoid key errors if logic proceeds
|
||||||
|
maintainer.end_time_cache = {}
|
||||||
|
|
||||||
|
# Mock filesystem
|
||||||
|
# One bad file, one good file
|
||||||
|
files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
|
||||||
|
|
||||||
|
with patch("os.listdir", return_value=files):
|
||||||
|
with patch("os.path.isfile", return_value=True):
|
||||||
|
with patch(
|
||||||
|
"frigate.record.maintainer.psutil.process_iter", return_value=[]
|
||||||
|
):
|
||||||
|
with patch("frigate.record.maintainer.logger.warning") as warn:
|
||||||
|
# Mock validate_and_move_segment to avoid further logic
|
||||||
|
maintainer.validate_and_move_segment = MagicMock()
|
||||||
|
|
||||||
|
try:
|
||||||
|
await maintainer.move_files()
|
||||||
|
except ValueError as e:
|
||||||
|
if "not enough values to unpack" in str(e):
|
||||||
|
self.fail("move_files() crashed on bad filename!")
|
||||||
|
raise e
|
||||||
|
except Exception:
|
||||||
|
# Ignore other errors (like DB connection) as we only care about the unpack crash
|
||||||
|
pass
|
||||||
|
|
||||||
|
# The bad filename is encountered in multiple loops, but should only warn once.
|
||||||
|
matching = [
|
||||||
|
c
|
||||||
|
for c in warn.call_args_list
|
||||||
|
if c.args
|
||||||
|
and isinstance(c.args[0], str)
|
||||||
|
and "Skipping unexpected files in cache" in c.args[0]
|
||||||
|
]
|
||||||
|
self.assertEqual(
|
||||||
|
1,
|
||||||
|
len(matching),
|
||||||
|
f"Expected a single warning for unexpected files, got {len(matching)}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
@ -31,3 +31,15 @@ class TestRecordRetention(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
assert not segment_info.should_discard_segment(RetainModeEnum.motion)
|
assert not segment_info.should_discard_segment(RetainModeEnum.motion)
|
||||||
assert segment_info.should_discard_segment(RetainModeEnum.active_objects)
|
assert segment_info.should_discard_segment(RetainModeEnum.active_objects)
|
||||||
|
|
||||||
|
def test_size_utility(self):
|
||||||
|
from frigate.util.size import parse_size_to_mb
|
||||||
|
|
||||||
|
assert parse_size_to_mb("10GB") == 10240
|
||||||
|
assert parse_size_to_mb("10MB") == 10
|
||||||
|
assert parse_size_to_mb("1024KB") == 1
|
||||||
|
assert parse_size_to_mb("1048576B") == 1
|
||||||
|
assert parse_size_to_mb("10") == 10
|
||||||
|
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
parse_size_to_mb("invalid")
|
||||||
|
|||||||
21
frigate/util/size.py
Normal file
21
frigate/util/size.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
"""Utility for parsing size strings."""
|
||||||
|
|
||||||
|
|
||||||
|
def parse_size_to_mb(size_str: str) -> float:
|
||||||
|
"""Parse a size string to megabytes."""
|
||||||
|
size_str = size_str.strip().upper()
|
||||||
|
if size_str.endswith("TB"):
|
||||||
|
return float(size_str[:-2]) * 1024 * 1024
|
||||||
|
elif size_str.endswith("GB"):
|
||||||
|
return float(size_str[:-2]) * 1024
|
||||||
|
elif size_str.endswith("MB"):
|
||||||
|
return float(size_str[:-2])
|
||||||
|
elif size_str.endswith("KB"):
|
||||||
|
return float(size_str[:-2]) / 1024
|
||||||
|
elif size_str.endswith("B"):
|
||||||
|
return float(size_str[:-1]) / (1024 * 1024)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
return float(size_str)
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(f"Invalid size string: {size_str}")
|
||||||
Loading…
Reference in New Issue
Block a user