diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b7705dcf6..93b084a8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,7 +35,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Log in to the Container registry - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/maintain_cache.yml b/.github/workflows/maintain_cache.yml index a15c8c684..30cb3de80 100644 --- a/.github/workflows/maintain_cache.yml +++ b/.github/workflows/maintain_cache.yml @@ -28,7 +28,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Log in to the Container registry - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc with: registry: ghcr.io username: ${{ github.actor }} diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 0a1b230aa..d40d235b9 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -558,6 +558,14 @@ ui: # Optional: Telemetry configuration telemetry: + # Optional: Enabled network interfaces for bandwidth stats monitoring (default: shown below) + network_interfaces: + - eth + - enp + - eno + - ens + - wl + - lo # Optional: Enable the latest version outbound check (default: shown below) # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions version_check: True diff --git a/frigate/app.py b/frigate/app.py index 2db8728b2..840b80710 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -1,3 +1,4 @@ +import datetime import logging import multiprocessing as mp import os @@ -167,6 +168,15 @@ class FrigateApp: self.timeline_queue: Queue = mp.Queue() def init_database(self) -> None: + def vacuum_db(db: SqliteExtDatabase) -> None: + db.execute_sql("VACUUM;") + + try: + with open(f"{CONFIG_DIR}/.vacuum", "w") as f: + f.write(str(datetime.datetime.now().timestamp())) + except PermissionError: + logger.error("Unable to write to /config to save DB state") + # Migrate DB location old_db_path = DEFAULT_DB_PATH if not os.path.isfile(self.config.database.path) and os.path.isfile( @@ -182,6 +192,24 @@ class FrigateApp: router = Router(migrate_db) router.run() + # check if vacuum needs to be run + if os.path.exists(f"{CONFIG_DIR}/.vacuum"): + with open(f"{CONFIG_DIR}/.vacuum") as f: + try: + timestamp = int(f.readline()) + except Exception: + timestamp = 0 + + if ( + timestamp + < ( + datetime.datetime.now() - datetime.timedelta(weeks=2) + ).timestamp() + ): + vacuum_db(migrate_db) + else: + vacuum_db(migrate_db) + migrate_db.close() def init_go2rtc(self) -> None: @@ -205,7 +233,15 @@ class FrigateApp: def bind_database(self) -> None: """Bind db to the main process.""" # NOTE: all db accessing processes need to be created before the db can be bound to the main process - self.db = SqliteQueueDatabase(self.config.database.path) + self.db = SqliteQueueDatabase( + self.config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache, + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=60, + ) models = [Event, Recordings, Timeline] self.db.bind(models) diff --git a/frigate/config.py b/frigate/config.py index 5c2f27b5a..43a43be0d 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -90,6 +90,10 @@ class UIConfig(FrigateBaseModel): class TelemetryConfig(FrigateBaseModel): + network_interfaces: List[str] = Field( + default=["eth", "enp", "eno", "ens", "wl", "lo"], + title="Enabled network interfaces for bandwidth calculation.", + ) version_check: bool = Field(default=True, title="Enable latest version check.") diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py index ada45e6fa..43fb5f8dc 100644 --- a/frigate/events/cleanup.py +++ b/frigate/events/cleanup.py @@ -52,7 +52,7 @@ class EventCleanup(threading.Thread): Event.camera.not_in(self.camera_keys), Event.start_time < expire_after, Event.label == event.label, - Event.retain_indefinitely is False, + Event.retain_indefinitely == False, ) # delete the media from disk for event in expired_events: @@ -72,7 +72,7 @@ class EventCleanup(threading.Thread): Event.camera.not_in(self.camera_keys), Event.start_time < expire_after, Event.label == event.label, - Event.retain_indefinitely is False, + Event.retain_indefinitely == False, ) update_query.execute() @@ -101,7 +101,7 @@ class EventCleanup(threading.Thread): Event.camera == name, Event.start_time < expire_after, Event.label == event.label, - Event.retain_indefinitely is False, + Event.retain_indefinitely == False, ) # delete the grabbed clips from disk for event in expired_events: @@ -120,7 +120,7 @@ class EventCleanup(threading.Thread): Event.camera == name, Event.start_time < expire_after, Event.label == event.label, - Event.retain_indefinitely is False, + Event.retain_indefinitely == False, ) update_query.execute() @@ -167,7 +167,7 @@ class EventCleanup(threading.Thread): # drop events from db where has_clip and has_snapshot are false delete_query = Event.delete().where( - Event.has_clip is False, Event.has_snapshot is False + Event.has_clip == False, Event.has_snapshot == False ) delete_query.execute() diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 5459fea6c..8ae6aee07 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -61,7 +61,7 @@ class EventProcessor(threading.Thread): def run(self) -> None: # set an end_time on events without an end_time on startup Event.update(end_time=Event.start_time + 30).where( - Event.end_time is None + Event.end_time == None ).execute() while not self.stop_event.is_set(): @@ -95,7 +95,7 @@ class EventProcessor(threading.Thread): # set an end_time on events without an end_time before exiting Event.update(end_time=datetime.datetime.now().timestamp()).where( - Event.end_time is None + Event.end_time == None ).execute() logger.info("Exiting event processor...") diff --git a/frigate/http.py b/frigate/http.py index 7ee4c7f43..b4813c1f2 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -591,7 +591,7 @@ def event_snapshot(id): event_complete = False jpg_bytes = None try: - event = Event.get(Event.id == id, Event.end_time is not None) + event = Event.get(Event.id == id, Event.end_time != None) event_complete = True if not event.has_snapshot: return "Snapshot not available", 404 @@ -643,7 +643,7 @@ def label_snapshot(camera_name, label): event_query = ( Event.select() .where(Event.camera == camera_name) - .where(Event.has_snapshot is True) + .where(Event.has_snapshot == True) .order_by(Event.start_time.desc()) ) else: @@ -651,7 +651,7 @@ def label_snapshot(camera_name, label): Event.select() .where(Event.camera == camera_name) .where(Event.label == label) - .where(Event.has_snapshot is True) + .where(Event.has_snapshot == True) .order_by(Event.start_time.desc()) ) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index dbe4cf207..75a1f9508 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -180,7 +180,9 @@ class RecordingCleanup(threading.Thread): # find all the recordings older than the oldest recording in the db try: - oldest_recording = Recordings.select().order_by(Recordings.start_time).get() + oldest_recording = ( + Recordings.select().order_by(Recordings.start_time).limit(1).get() + ) p = Path(oldest_recording.path) oldest_timestamp = p.stat().st_mtime - 1 diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 079c4ffa9..cab7b669d 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -115,7 +115,7 @@ class RecordingMaintainer(threading.Thread): Event.select() .where( Event.camera == camera, - (Event.end_time is None) + (Event.end_time == None) | (Event.end_time >= recordings[0]["start_time"].timestamp()), Event.has_clip, ) diff --git a/frigate/record/record.py b/frigate/record/record.py index ba927d126..ab6cd3450 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -37,7 +37,15 @@ def manage_recordings( setproctitle("frigate.recording_manager") listen() - db = SqliteQueueDatabase(config.database.path) + db = SqliteQueueDatabase( + config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=60, + ) models = [Event, Recordings, Timeline, RecordingsToDelete] db.bind(models) @@ -48,5 +56,3 @@ def manage_recordings( cleanup = RecordingCleanup(config, stop_event) cleanup.start() - - logger.info("recording_manager: exiting subprocess") diff --git a/frigate/stats.py b/frigate/stats.py index 3fd881029..dd8c4b06d 100644 --- a/frigate/stats.py +++ b/frigate/stats.py @@ -108,7 +108,7 @@ def get_processing_stats( [ asyncio.create_task(set_gpu_stats(config, stats, hwaccel_errors)), asyncio.create_task(set_cpu_stats(stats)), - asyncio.create_task(set_bandwidth_stats(stats)), + asyncio.create_task(set_bandwidth_stats(config, stats)), ] ) @@ -126,9 +126,9 @@ async def set_cpu_stats(all_stats: dict[str, Any]) -> None: all_stats["cpu_usages"] = cpu_stats -async def set_bandwidth_stats(all_stats: dict[str, Any]) -> None: +async def set_bandwidth_stats(config: FrigateConfig, all_stats: dict[str, Any]) -> None: """Set bandwidth from nethogs.""" - bandwidth_stats = get_bandwidth_stats() + bandwidth_stats = get_bandwidth_stats(config) if bandwidth_stats: all_stats["bandwidth_usages"] = bandwidth_stats diff --git a/frigate/storage.py b/frigate/storage.py index 6cdd54bdc..d2cab553a 100644 --- a/frigate/storage.py +++ b/frigate/storage.py @@ -36,9 +36,7 @@ class StorageMaintainer(threading.Thread): self.camera_storage_stats[camera] = { "needs_refresh": ( Recordings.select(fn.COUNT(Recordings.id)) - .where( - Recordings.camera == camera, Recordings.segment_size != 0 - ) + .where(Recordings.camera == camera, Recordings.segment_size > 0) .scalar() < 50 ) @@ -48,7 +46,7 @@ class StorageMaintainer(threading.Thread): try: bandwidth = round( Recordings.select(fn.AVG(bandwidth_equation)) - .where(Recordings.camera == camera, Recordings.segment_size != 0) + .where(Recordings.camera == camera, Recordings.segment_size > 0) .limit(100) .scalar() * 3600, @@ -107,7 +105,7 @@ class StorageMaintainer(threading.Thread): retained_events: Event = ( Event.select() .where( - Event.retain_indefinitely is True, + Event.retain_indefinitely == True, Event.has_clip, ) .order_by(Event.start_time.asc()) @@ -178,6 +176,7 @@ class StorageMaintainer(threading.Thread): def run(self): """Check every 5 minutes if storage needs to be cleaned up.""" + self.calculate_camera_bandwidth() while not self.stop_event.wait(300): if not self.camera_storage_stats or True in [ r["needs_refresh"] for r in self.camera_storage_stats.values() diff --git a/frigate/util.py b/frigate/util.py index e624e877a..897aa8d21 100755 --- a/frigate/util.py +++ b/frigate/util.py @@ -844,10 +844,27 @@ def get_cpu_stats() -> dict[str, dict]: return usages -def get_bandwidth_stats() -> dict[str, dict]: +def get_physical_interfaces(interfaces) -> list: + with open("/proc/net/dev", "r") as file: + lines = file.readlines() + + physical_interfaces = [] + for line in lines: + if ":" in line: + interface = line.split(":")[0].strip() + for int in interfaces: + if interface.startswith(int): + physical_interfaces.append(interface) + + return physical_interfaces + + +def get_bandwidth_stats(config) -> dict[str, dict]: """Get bandwidth usages for each ffmpeg process id""" usages = {} - top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces( + config.telemetry.network_interfaces + ) p = sp.run( top_command, diff --git a/migrations/017_update_indexes.py b/migrations/017_update_indexes.py new file mode 100644 index 000000000..8aa53f8ee --- /dev/null +++ b/migrations/017_update_indexes.py @@ -0,0 +1,35 @@ +"""Peewee migrations -- 017_update_indexes.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + 'CREATE INDEX "recordings_camera_segment_size" ON "recordings" ("camera", "segment_size")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass diff --git a/pyproject.toml b/pyproject.toml index e3ef3faf5..3123a3242 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,4 +2,4 @@ profile = "black" [tool.ruff] -ignore = ["E501"] \ No newline at end of file +ignore = ["E501","E711","E712"] \ No newline at end of file