Merge branch 'dev' of github.com:blakeblackshear/frigate into measure-and-publish-dbfs

This commit is contained in:
JP Verdejo 2023-07-06 09:32:45 -05:00
commit abfcd68d5e
38 changed files with 985 additions and 799 deletions

View File

@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
sudo mkdir -p /media/frigate sudo mkdir -p /media/frigate
sudo chown -R "$(id -u):$(id -g)" /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
# s6 service file. For dev, where frigate is started from an interactive
# shell, we define it in .bashrc instead.
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
make version make version
cd web cd web

View File

@ -44,6 +44,7 @@ function migrate_db_path() {
echo "[INFO] Preparing Frigate..." echo "[INFO] Preparing Frigate..."
migrate_db_path migrate_db_path
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
echo "[INFO] Starting Frigate..." echo "[INFO] Starting Frigate..."

View File

@ -43,6 +43,8 @@ function get_ip_and_port_from_supervisor() {
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}" export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
} }
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Preparing go2rtc config..." echo "[INFO] Preparing go2rtc config..."

View File

@ -7,7 +7,7 @@ import sys
import yaml import yaml
sys.path.insert(0, "/opt/frigate") sys.path.insert(0, "/opt/frigate")
from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402 from frigate.const import BIRDSEYE_PIPE # noqa: E402
from frigate.ffmpeg_presets import ( # noqa: E402 from frigate.ffmpeg_presets import ( # noqa: E402
parse_preset_hardware_acceleration_encode, parse_preset_hardware_acceleration_encode,
) )
@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None:
go2rtc_config["rtsp"]["default_query"] = "mp4" go2rtc_config["rtsp"]["default_query"] = "mp4"
# need to replace ffmpeg command when using ffmpeg4 # need to replace ffmpeg command when using ffmpeg4
if not os.path.exists(BTBN_PATH): if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
if go2rtc_config.get("ffmpeg") is None: if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = { go2rtc_config["ffmpeg"] = {
"rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"

View File

@ -467,10 +467,11 @@ cameras:
# Required: the path to the stream # Required: the path to the stream
# NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {} # NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: detect,record,rtmp # Required: list of roles for this stream. valid values are: audio,detect,record,rtmp
# NOTICE: In addition to assigning the record and rtmp roles, # NOTICE: In addition to assigning the audio, record, and rtmp roles,
# they must also be enabled in the camera config. # they must also be enabled in the camera config.
roles: roles:
- audio
- detect - detect
- record - record
- rtmp - rtmp

View File

@ -109,11 +109,19 @@ Same data available at `/api/stats` published at a configurable interval.
### `frigate/<camera_name>/detect/set` ### `frigate/<camera_name>/detect/set`
Topic to turn detection for a camera on and off. Expected values are `ON` and `OFF`. Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/detect/state` ### `frigate/<camera_name>/detect/state`
Topic with current state of detection for a camera. Published values are `ON` and `OFF`. Topic with current state of object detection for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/audio/set`
Topic to turn audio detection for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/audio/state`
Topic with current state of audio detection for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/recordings/set` ### `frigate/<camera_name>/recordings/set`
@ -176,7 +184,7 @@ Topic to send PTZ commands to camera.
| Command | Description | | Command | Description |
| ---------------------- | ----------------------------------------------------------------------------------------- | | ---------------------- | ----------------------------------------------------------------------------------------- |
| `preset-<preset_name>` | send command to move to preset with name `<preset_name>` | | `preset_<preset_name>` | send command to move to preset with name `<preset_name>` |
| `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] | | `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] |
| `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] | | `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] |
| `STOP` | send command to stop moving | | `STOP` | send command to stop moving |

View File

@ -10,6 +10,7 @@ from multiprocessing.synchronize import Event as MpEvent
from types import FrameType from types import FrameType
from typing import Optional from typing import Optional
import faster_fifo as ff
import psutil import psutil
from faster_fifo import Queue from faster_fifo import Queue
from peewee_migrate import Router from peewee_migrate import Router
@ -47,6 +48,7 @@ from frigate.stats import StatsEmitter, stats_init
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor from frigate.timeline import TimelineProcessor
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.util.builtin import LimitedQueue as LQueue
from frigate.version import VERSION from frigate.version import VERSION
from frigate.video import capture_camera, track_camera from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog from frigate.watchdog import FrigateWatchdog
@ -57,11 +59,11 @@ logger = logging.getLogger(__name__)
class FrigateApp: class FrigateApp:
def __init__(self) -> None: def __init__(self) -> None:
self.stop_event: MpEvent = mp.Event() self.stop_event: MpEvent = mp.Event()
self.detection_queue: Queue = mp.Queue() self.detection_queue: Queue = ff.Queue()
self.detectors: dict[str, ObjectDetectProcess] = {} self.detectors: dict[str, ObjectDetectProcess] = {}
self.detection_out_events: dict[str, MpEvent] = {} self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue() self.log_queue: Queue = ff.Queue()
self.plus_api = PlusApi() self.plus_api = PlusApi()
self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.feature_metrics: dict[str, FeatureMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
@ -157,7 +159,7 @@ class FrigateApp:
"ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item] "ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards # from mypy 0.981 onwards
"frame_queue": mp.Queue(maxsize=2), "frame_queue": LQueue(maxsize=2),
"capture_process": None, "capture_process": None,
"process": None, "process": None,
} }
@ -189,22 +191,22 @@ class FrigateApp:
def init_queues(self) -> None: def init_queues(self) -> None:
# Queues for clip processing # Queues for clip processing
self.event_queue: Queue = mp.Queue() self.event_queue: Queue = ff.Queue()
self.event_processed_queue: Queue = mp.Queue() self.event_processed_queue: Queue = ff.Queue()
self.video_output_queue: Queue = mp.Queue( self.video_output_queue: Queue = LQueue(
maxsize=len(self.config.cameras.keys()) * 2 maxsize=len(self.config.cameras.keys()) * 2
) )
# Queue for cameras to push tracked objects to # Queue for cameras to push tracked objects to
self.detected_frames_queue: Queue = mp.Queue( self.detected_frames_queue: Queue = LQueue(
maxsize=len(self.config.cameras.keys()) * 2 maxsize=len(self.config.cameras.keys()) * 2
) )
# Queue for recordings info # Queue for recordings info
self.recordings_info_queue: Queue = mp.Queue() self.recordings_info_queue: Queue = ff.Queue()
# Queue for timeline events # Queue for timeline events
self.timeline_queue: Queue = mp.Queue() self.timeline_queue: Queue = ff.Queue()
# Queue for inter process communication # Queue for inter process communication
self.inter_process_queue: Queue = mp.Queue() self.inter_process_queue: Queue = mp.Queue()
@ -452,6 +454,7 @@ class FrigateApp:
) )
audio_process.daemon = True audio_process.daemon = True
audio_process.start() audio_process.start()
self.processes["audioDetector"] = audio_process.pid or 0
logger.info(f"Audio process started: {audio_process.pid}") logger.info(f"Audio process started: {audio_process.pid}")
def start_timeline_processor(self) -> None: def start_timeline_processor(self) -> None:

View File

@ -7,7 +7,7 @@ from typing import Any, Callable
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.ptz import OnvifCommandEnum, OnvifController from frigate.ptz import OnvifCommandEnum, OnvifController
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.util import restart_frigate from frigate.util.services import restart_frigate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -255,7 +255,7 @@ class Dispatcher:
try: try:
if "preset" in payload.lower(): if "preset" in payload.lower():
command = OnvifCommandEnum.preset command = OnvifCommandEnum.preset
param = payload.lower().split("-")[1] param = payload.lower()[payload.index("_") + 1 :]
else: else:
command = OnvifCommandEnum[payload.lower()] command = OnvifCommandEnum[payload.lower()]
param = "" param = ""

View File

@ -149,6 +149,7 @@ class MqttClient(Communicator): # type: ignore[misc]
"recordings", "recordings",
"snapshots", "snapshots",
"detect", "detect",
"audio",
"motion", "motion",
"improve_contrast", "improve_contrast",
"motion_threshold", "motion_threshold",

View File

@ -22,13 +22,13 @@ from frigate.ffmpeg_presets import (
parse_preset_output_rtmp, parse_preset_output_rtmp,
) )
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util import ( from frigate.util.builtin import (
create_mask,
deep_merge, deep_merge,
escape_special_characters, escape_special_characters,
get_ffmpeg_arg_list, get_ffmpeg_arg_list,
load_config_with_no_duplicates, load_config_with_no_duplicates,
) )
from frigate.util.image import create_mask
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -11,7 +11,6 @@ YAML_EXT = (".yaml", ".yml")
FRIGATE_LOCALHOST = "http://127.0.0.1:5000" FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video" PLUS_API_HOST = "https://api.frigate.video"
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
# Attributes # Attributes

View File

@ -11,7 +11,7 @@ from pydantic import BaseModel, Extra, Field
from pydantic.fields import PrivateAttr from pydantic.fields import PrivateAttr
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util import load_labels from frigate.util.builtin import load_labels
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -27,7 +27,8 @@ from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.object_detection import load_labels from frigate.object_detection import load_labels
from frigate.types import FeatureMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import get_ffmpeg_arg_list, listen from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.services import listen
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
try: try:
@ -211,7 +212,7 @@ class AudioEventMaintainer(threading.Thread):
else: else:
resp = requests.post( resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create", f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create",
json={"duration": None}, json={"duration": None, "source_type": "audio"},
) )
if resp.status_code == 200: if resp.status_code == 200:
@ -226,18 +227,26 @@ class AudioEventMaintainer(threading.Thread):
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
for detection in self.detections.values(): for detection in self.detections.values():
if not detection:
continue
if ( if (
now - detection.get("last_detection", now) now - detection.get("last_detection", now)
> self.config.audio.max_not_heard > self.config.audio.max_not_heard
): ):
self.detections[detection["label"]] = None resp = requests.put(
requests.put(
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end", f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
json={ json={
"end_time": detection["last_detection"] "end_time": detection["last_detection"]
+ self.config.record.events.post_capture + self.config.record.events.post_capture
}, },
) )
if resp.status_code == 200:
self.detections[detection["label"]] = None
else:
logger.warn(
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
)
def restart_audio_pipe(self) -> None: def restart_audio_pipe(self) -> None:
try: try:

View File

@ -14,7 +14,7 @@ from faster_fifo import Queue
from frigate.config import CameraConfig, FrigateConfig from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.util import draw_box_with_label from frigate.util.image import draw_box_with_label
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -29,6 +29,7 @@ class ExternalEventProcessor:
self, self,
camera: str, camera: str,
label: str, label: str,
source_type: str,
sub_label: Optional[str], sub_label: Optional[str],
duration: Optional[int], duration: Optional[int],
include_recording: bool, include_recording: bool,
@ -56,11 +57,16 @@ class ExternalEventProcessor:
"label": label, "label": label,
"sub_label": sub_label, "sub_label": sub_label,
"camera": camera, "camera": camera,
"start_time": now, "start_time": now - camera_config.record.events.pre_capture,
"end_time": now + duration if duration is not None else None, "end_time": now
+ duration
+ camera_config.record.events.post_capture
if duration is not None
else None,
"thumbnail": thumbnail, "thumbnail": thumbnail,
"has_clip": camera_config.record.enabled and include_recording, "has_clip": camera_config.record.enabled and include_recording,
"has_snapshot": True, "has_snapshot": True,
"type": source_type,
}, },
) )
) )

View File

@ -11,7 +11,7 @@ from faster_fifo import Queue
from frigate.config import EventsConfig, FrigateConfig from frigate.config import EventsConfig, FrigateConfig
from frigate.models import Event from frigate.models import Event
from frigate.types import CameraMetricsTypes from frigate.types import CameraMetricsTypes
from frigate.util import to_relative_box from frigate.util.builtin import to_relative_box
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -193,6 +193,7 @@ class EventProcessor(threading.Thread):
"score": score, "score": score,
"top_score": event_data["top_score"], "top_score": event_data["top_score"],
"attributes": attributes, "attributes": attributes,
"type": "object",
}, },
} }
@ -216,8 +217,8 @@ class EventProcessor(threading.Thread):
del self.events_in_process[event_data["id"]] del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera)) self.event_processed_queue.put((event_data["id"], camera))
def handle_external_detection(self, type: str, event_data: Event) -> None: def handle_external_detection(self, event_type: str, event_data: Event) -> None:
if type == "new": if event_type == "new":
event = { event = {
Event.id: event_data["id"], Event.id: event_data["id"],
Event.label: event_data["label"], Event.label: event_data["label"],
@ -229,16 +230,16 @@ class EventProcessor(threading.Thread):
Event.has_clip: event_data["has_clip"], Event.has_clip: event_data["has_clip"],
Event.has_snapshot: event_data["has_snapshot"], Event.has_snapshot: event_data["has_snapshot"],
Event.zones: [], Event.zones: [],
Event.data: {}, Event.data: {"type": event_data["type"]},
} }
Event.insert(event).execute() Event.insert(event).execute()
elif type == "end": elif event_type == "end":
event = { event = {
Event.id: event_data["id"], Event.id: event_data["id"],
Event.end_time: event_data["end_time"], Event.end_time: event_data["end_time"],
} }
try: try:
Event.update(event).execute() Event.update(event).where(Event.id == event_data["id"]).execute()
except Exception: except Exception:
logger.warning(f"Failed to update manual event: {event_data['id']}") logger.warning(f"Failed to update manual event: {event_data['id']}")

View File

@ -5,8 +5,7 @@ import os
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from frigate.const import BTBN_PATH from frigate.util.services import vainfo_hwaccel
from frigate.util import vainfo_hwaccel
from frigate.version import VERSION from frigate.version import VERSION
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,7 +42,11 @@ class LibvaGpuSelector:
return "" return ""
TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" TIMEOUT_PARAM = (
"-timeout"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
else "-stimeout"
)
_gpu_selector = LibvaGpuSelector() _gpu_selector = LibvaGpuSelector()
_user_agent_args = [ _user_agent_args = [
@ -107,14 +110,14 @@ PRESETS_HW_ACCEL_DECODE = {
} }
PRESETS_HW_ACCEL_SCALE = { PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-32-h264": "-r {0} -s {1}x{2}", "preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h264": "-r {0} -s {1}x{2}", "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"default": "-r {0} -s {1}x{2}", "default": "-r {0} -vf fps={0},scale={1}:{2}",
} }
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {

View File

@ -38,13 +38,8 @@ from frigate.ptz import OnvifController
from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.record.export import PlaybackFactorEnum, RecordingExporter
from frigate.stats import stats_snapshot from frigate.stats import stats_snapshot
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.util import ( from frigate.util.builtin import clean_camera_user_pass, get_tz_modifiers
clean_camera_user_pass, from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel
ffprobe_stream,
get_tz_modifiers,
restart_frigate,
vainfo_hwaccel,
)
from frigate.version import VERSION from frigate.version import VERSION
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -884,6 +879,7 @@ def create_event(camera_name, label):
event_id = current_app.external_processor.create_manual_event( event_id = current_app.external_processor.create_manual_event(
camera_name, camera_name,
label, label,
json.get("source_type", "api"),
json.get("sub_label", None), json.get("sub_label", None),
json.get("duration", 30), json.get("duration", 30),
json.get("include_recording", True), json.get("include_recording", True),

View File

@ -13,7 +13,7 @@ from typing import Deque, Optional
from faster_fifo import Queue from faster_fifo import Queue
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.util import clean_camera_user_pass from frigate.util.builtin import clean_camera_user_pass
def listener_configurer() -> None: def listener_configurer() -> None:

View File

@ -7,12 +7,15 @@ import signal
import threading import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import faster_fifo as ff
import numpy as np import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.detectors import create_detector from frigate.detectors import create_detector
from frigate.detectors.detector_config import InputTensorEnum from frigate.detectors.detector_config import InputTensorEnum
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels from frigate.util.builtin import EventsPerSecond, load_labels
from frigate.util.image import SharedMemoryFrameManager
from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -72,7 +75,7 @@ class LocalObjectDetector(ObjectDetector):
def run_detector( def run_detector(
name: str, name: str,
detection_queue: mp.Queue, detection_queue: ff.Queue,
out_events: dict[str, mp.Event], out_events: dict[str, mp.Event],
avg_speed, avg_speed,
start, start,

View File

@ -22,7 +22,7 @@ from frigate.config import (
) )
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.util import ( from frigate.util.image import (
SharedMemoryFrameManager, SharedMemoryFrameManager,
area, area,
calculate_region, calculate_region,

View File

@ -24,11 +24,70 @@ from ws4py.websocket import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop from frigate.util.image import (
SharedMemoryFrameManager,
copy_yuv_to_position,
get_yuv_crop,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def get_standard_aspect_ratio(width, height) -> tuple[int, int]:
"""Ensure that only standard aspect ratios are used."""
known_aspects = [
(16, 9),
(9, 16),
(32, 9),
(12, 9),
(9, 12),
] # aspects are scaled to have common relative size
known_aspects_ratios = list(
map(lambda aspect: aspect[0] / aspect[1], known_aspects)
)
closest = min(
known_aspects_ratios,
key=lambda x: abs(x - (width / height)),
)
return known_aspects[known_aspects_ratios.index(closest)]
class Canvas:
def __init__(self, canvas_width: int, canvas_height: int) -> None:
gcd = math.gcd(canvas_width, canvas_height)
self.aspect = get_standard_aspect_ratio(
(canvas_width / gcd), (canvas_height / gcd)
)
self.width = canvas_width
self.height = (self.width * self.aspect[1]) / self.aspect[0]
self.coefficient_cache: dict[int, int] = {}
self.aspect_cache: dict[str, tuple[int, int]] = {}
def get_aspect(self, coefficient: int) -> tuple[int, int]:
return (self.aspect[0] * coefficient, self.aspect[1] * coefficient)
def get_coefficient(self, camera_count: int) -> int:
return self.coefficient_cache.get(camera_count, 2)
def set_coefficient(self, camera_count: int, coefficient: int) -> None:
self.coefficient_cache[camera_count] = coefficient
def get_camera_aspect(
self, cam_name: str, camera_width: int, camera_height: int
) -> tuple[int, int]:
cached = self.aspect_cache.get(cam_name)
if cached:
return cached
gcd = math.gcd(camera_width, camera_height)
camera_aspect = get_standard_aspect_ratio(
camera_width / gcd, camera_height / gcd
)
self.aspect_cache[cam_name] = camera_aspect
return camera_aspect
class FFMpegConverter: class FFMpegConverter:
def __init__( def __init__(
self, self,
@ -170,6 +229,7 @@ class BirdsEyeFrameManager:
self.frame_shape = (height, width) self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width) self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
self.canvas = Canvas(width, height)
self.stop_event = stop_event self.stop_event = stop_event
# initialize the frame as black and with the Frigate logo # initialize the frame as black and with the Frigate logo
@ -276,119 +336,6 @@ class BirdsEyeFrameManager:
def update_frame(self): def update_frame(self):
"""Update to a new frame for birdseye.""" """Update to a new frame for birdseye."""
def calculate_layout(
canvas, cameras_to_add: list[str], coefficient
) -> tuple[any]:
"""Calculate the optimal layout for 2+ cameras."""
camera_layout: list[list[any]] = []
camera_layout.append([])
canvas_gcd = math.gcd(canvas[0], canvas[1])
canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient
canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient
starting_x = 0
x = starting_x
y = 0
y_i = 0
max_y = 0
for camera in cameras_to_add:
camera_dims = self.cameras[camera]["dimensions"].copy()
camera_gcd = math.gcd(camera_dims[0], camera_dims[1])
camera_aspect_x = camera_dims[0] / camera_gcd
camera_aspect_y = camera_dims[1] / camera_gcd
if round(camera_aspect_x / camera_aspect_y, 1) == 1.8:
# account for slightly off 16:9 cameras
camera_aspect_x = 16
camera_aspect_y = 9
elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3:
# make 4:3 cameras the same relative size as 16:9
camera_aspect_x = 12
camera_aspect_y = 9
if camera_dims[1] > camera_dims[0]:
portrait = True
else:
portrait = False
if (x + camera_aspect_x) <= canvas_aspect_x:
# insert if camera can fit on current row
camera_layout[y_i].append(
(
camera,
(
camera_aspect_x,
camera_aspect_y,
),
)
)
if portrait:
starting_x = camera_aspect_x
else:
max_y = max(
max_y,
camera_aspect_y,
)
x += camera_aspect_x
else:
# move on to the next row and insert
y += max_y
y_i += 1
camera_layout.append([])
x = starting_x
if x + camera_aspect_x > canvas_aspect_x:
return None
camera_layout[y_i].append(
(
camera,
(camera_aspect_x, camera_aspect_y),
)
)
x += camera_aspect_x
if y + max_y > canvas_aspect_y:
return None
row_height = int(canvas_height / coefficient)
final_camera_layout = []
starting_x = 0
y = 0
for row in camera_layout:
final_row = []
x = starting_x
for cameras in row:
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
if camera_dims[1] > camera_dims[0]:
scaled_height = int(row_height * coefficient)
scaled_width = int(
scaled_height * camera_dims[0] / camera_dims[1]
)
starting_x = scaled_width
else:
scaled_height = row_height
scaled_width = int(
scaled_height * camera_dims[0] / camera_dims[1]
)
if (
x + scaled_width > canvas_width
or y + scaled_height > canvas_height
):
return None
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
x += scaled_width
y += row_height
final_camera_layout.append(final_row)
return final_camera_layout
# determine how many cameras are tracking objects within the last 30 seconds # determine how many cameras are tracking objects within the last 30 seconds
active_cameras = set( active_cameras = set(
[ [
@ -411,10 +358,8 @@ class BirdsEyeFrameManager:
self.clear_frame() self.clear_frame()
return True return True
# check if we need to reset the layout because there are new cameras to add # check if we need to reset the layout because there is a different number of cameras
reset_layout = ( reset_layout = len(self.active_cameras) - len(active_cameras) != 0
True if len(active_cameras.difference(self.active_cameras)) > 0 else False
)
# reset the layout if it needs to be different # reset the layout if it needs to be different
if reset_layout: if reset_layout:
@ -433,16 +378,15 @@ class BirdsEyeFrameManager:
), ),
) )
canvas_width = self.config.birdseye.width
canvas_height = self.config.birdseye.height
if len(active_cameras) == 1: if len(active_cameras) == 1:
# show single camera as fullscreen # show single camera as fullscreen
camera = active_cameras_to_add[0] camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy() camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(canvas_height * camera_dims[0] / camera_dims[1]) scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
coefficient = ( coefficient = (
1 if scaled_width <= canvas_width else canvas_width / scaled_width 1
if scaled_width <= self.canvas.width
else self.canvas.width / scaled_width
) )
self.camera_layout = [ self.camera_layout = [
[ [
@ -452,14 +396,14 @@ class BirdsEyeFrameManager:
0, 0,
0, 0,
int(scaled_width * coefficient), int(scaled_width * coefficient),
int(canvas_height * coefficient), int(self.canvas.height * coefficient),
), ),
) )
] ]
] ]
else: else:
# calculate optimal layout # calculate optimal layout
coefficient = 2 coefficient = self.canvas.get_coefficient(len(active_cameras))
calculating = True calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
@ -467,8 +411,7 @@ class BirdsEyeFrameManager:
if self.stop_event.is_set(): if self.stop_event.is_set():
return return
layout_candidate = calculate_layout( layout_candidate = self.calculate_layout(
(canvas_width, canvas_height),
active_cameras_to_add, active_cameras_to_add,
coefficient, coefficient,
) )
@ -482,6 +425,7 @@ class BirdsEyeFrameManager:
return return
calculating = False calculating = False
self.canvas.set_coefficient(len(active_cameras), coefficient)
self.camera_layout = layout_candidate self.camera_layout = layout_candidate
@ -493,6 +437,125 @@ class BirdsEyeFrameManager:
return True return True
def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]:
"""Calculate the optimal layout for 2+ cameras."""
def map_layout(row_height: int):
"""Map the calculated layout."""
candidate_layout = []
starting_x = 0
x = 0
max_width = 0
y = 0
for row in camera_layout:
final_row = []
max_width = max(max_width, x)
x = starting_x
for cameras in row:
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
camera_aspect = cameras[1]
if camera_dims[1] > camera_dims[0]:
scaled_height = int(row_height * 2)
scaled_width = int(scaled_height * camera_aspect)
starting_x = scaled_width
else:
scaled_height = row_height
scaled_width = int(scaled_height * camera_aspect)
# layout is too large
if (
x + scaled_width > self.canvas.width
or y + scaled_height > self.canvas.height
):
return 0, 0, None
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
x += scaled_width
y += row_height
candidate_layout.append(final_row)
return max_width, y, candidate_layout
canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient)
camera_layout: list[list[any]] = []
camera_layout.append([])
starting_x = 0
x = starting_x
y = 0
y_i = 0
max_y = 0
for camera in cameras_to_add:
camera_dims = self.cameras[camera]["dimensions"].copy()
camera_aspect_x, camera_aspect_y = self.canvas.get_camera_aspect(
camera, camera_dims[0], camera_dims[1]
)
if camera_dims[1] > camera_dims[0]:
portrait = True
else:
portrait = False
if (x + camera_aspect_x) <= canvas_aspect_x:
# insert if camera can fit on current row
camera_layout[y_i].append(
(
camera,
camera_aspect_x / camera_aspect_y,
)
)
if portrait:
starting_x = camera_aspect_x
else:
max_y = max(
max_y,
camera_aspect_y,
)
x += camera_aspect_x
else:
# move on to the next row and insert
y += max_y
y_i += 1
camera_layout.append([])
x = starting_x
if x + camera_aspect_x > canvas_aspect_x:
return None
camera_layout[y_i].append(
(
camera,
camera_aspect_x / camera_aspect_y,
)
)
x += camera_aspect_x
if y + max_y > canvas_aspect_y:
return None
row_height = int(self.canvas.height / coefficient)
total_width, total_height, standard_candidate_layout = map_layout(row_height)
# layout can't be optimized more
if total_width / self.canvas.width >= 0.99:
return standard_candidate_layout
scale_up_percent = min(
1 - (total_width / self.canvas.width),
1 - (total_height / self.canvas.height),
)
row_height = int(row_height * (1 + round(scale_up_percent, 1)))
_, _, scaled_layout = map_layout(row_height)
if scaled_layout:
return scaled_layout
else:
return standard_candidate_layout
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool: def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
# don't process if birdseye is disabled for this camera # don't process if birdseye is disabled for this camera
camera_config = self.config.cameras[camera].birdseye camera_config = self.config.cameras[camera].birdseye

View File

@ -3,7 +3,6 @@
import asyncio import asyncio
import datetime import datetime
import logging import logging
import multiprocessing as mp
import os import os
import queue import queue
import random import random
@ -15,13 +14,15 @@ from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from typing import Any, Tuple from typing import Any, Tuple
import faster_fifo as ff
import psutil import psutil
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.types import FeatureMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import area, get_video_properties from frigate.util.image import area
from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -30,7 +31,7 @@ class RecordingMaintainer(threading.Thread):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: ff.Queue,
process_info: dict[str, FeatureMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
stop_event: MpEvent, stop_event: MpEvent,
): ):

View File

@ -7,6 +7,7 @@ import threading
from types import FrameType from types import FrameType
from typing import Optional from typing import Optional
import faster_fifo as ff
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
from setproctitle import setproctitle from setproctitle import setproctitle
@ -15,14 +16,14 @@ from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
from frigate.record.cleanup import RecordingCleanup from frigate.record.cleanup import RecordingCleanup
from frigate.record.maintainer import RecordingMaintainer from frigate.record.maintainer import RecordingMaintainer
from frigate.types import FeatureMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import listen from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def manage_recordings( def manage_recordings(
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: ff.Queue,
process_info: dict[str, FeatureMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
) -> None: ) -> None:
stop_event = mp.Event() stop_event = mp.Event()

View File

@ -17,7 +17,7 @@ from frigate.config import FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
from frigate.types import CameraMetricsTypes, StatsTrackingTypes from frigate.types import CameraMetricsTypes, StatsTrackingTypes
from frigate.util import ( from frigate.util.services import (
get_amd_gpu_stats, get_amd_gpu_stats,
get_bandwidth_stats, get_bandwidth_stats,
get_cpu_stats, get_cpu_stats,

View File

@ -2,7 +2,7 @@
import unittest import unittest
from frigate.util import clean_camera_user_pass, escape_special_characters from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
class TestUserPassCleanup(unittest.TestCase): class TestUserPassCleanup(unittest.TestCase):

View File

@ -9,7 +9,7 @@ from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
from frigate.detectors import DetectorTypeEnum from frigate.detectors import DetectorTypeEnum
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util import deep_merge, load_config_with_no_duplicates from frigate.util.builtin import deep_merge, load_config_with_no_duplicates
class TestConfig(unittest.TestCase): class TestConfig(unittest.TestCase):

View File

@ -3,7 +3,7 @@ from unittest import TestCase, main
import cv2 import cv2
import numpy as np import numpy as np
from frigate.util import copy_yuv_to_position, get_yuv_crop from frigate.util.image import copy_yuv_to_position, get_yuv_crop
class TestCopyYuvToPosition(TestCase): class TestCopyYuvToPosition(TestCase):

View File

@ -1,7 +1,7 @@
import unittest import unittest
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
from frigate.util import get_amd_gpu_stats, get_intel_gpu_stats from frigate.util.services import get_amd_gpu_stats, get_intel_gpu_stats
class TestGpuStats(unittest.TestCase): class TestGpuStats(unittest.TestCase):

View File

@ -5,7 +5,7 @@ import numpy as np
from norfair.drawing.color import Palette from norfair.drawing.color import Palette
from norfair.drawing.drawer import Drawer from norfair.drawing.drawer import Drawer
from frigate.util import intersection from frigate.util.image import intersection
from frigate.video import ( from frigate.video import (
get_cluster_boundary, get_cluster_boundary,
get_cluster_candidates, get_cluster_candidates,

View File

@ -3,7 +3,7 @@ from unittest import TestCase, main
import cv2 import cv2
import numpy as np import numpy as np
from frigate.util import yuv_region_2_rgb from frigate.util.image import yuv_region_2_rgb
class TestYuvRegion2RGB(TestCase): class TestYuvRegion2RGB(TestCase):

View File

@ -10,7 +10,7 @@ from faster_fifo import Queue
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.models import Timeline from frigate.models import Timeline
from frigate.util import to_relative_box from frigate.util.builtin import to_relative_box
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -7,7 +7,7 @@ from norfair.drawing.drawer import Drawer
from frigate.config import DetectConfig from frigate.config import DetectConfig
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.util import intersection_over_union from frigate.util.image import intersection_over_union
# Normalizes distance from estimate relative to object size # Normalizes distance from estimate relative to object size

226
frigate/util/builtin.py Normal file
View File

@ -0,0 +1,226 @@
"""Utilities for builtin types manipulation."""
import copy
import ctypes
import datetime
import logging
import multiprocessing
import re
import shlex
import time
import urllib.parse
from collections import Counter
from collections.abc import Mapping
from queue import Empty, Full
from typing import Any, Tuple
import pytz
import yaml
from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT
from faster_fifo import Queue as FFQueue
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
logger = logging.getLogger(__name__)
class EventsPerSecond:
def __init__(self, max_events=1000, last_n_seconds=10):
self._start = None
self._max_events = max_events
self._last_n_seconds = last_n_seconds
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self._start = now
self._timestamps.append(now)
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events + 100:
self._timestamps = self._timestamps[(1 - self._max_events) :]
self.expire_timestamps(now)
def eps(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self._start = now
# compute the (approximate) events in the last n seconds
self.expire_timestamps(now)
seconds = min(now - self._start, self._last_n_seconds)
# avoid divide by zero
if seconds == 0:
seconds = 1
return len(self._timestamps) / seconds
# remove aged out timestamps
def expire_timestamps(self, now):
threshold = now - self._last_n_seconds
while self._timestamps and self._timestamps[0] < threshold:
del self._timestamps[0]
class LimitedQueue(FFQueue):
def __init__(
self,
maxsize=0,
max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE,
loads=None,
dumps=None,
):
super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps)
self.maxsize = maxsize
self.size = multiprocessing.RawValue(
ctypes.c_int, 0
) # Add a counter for the number of items in the queue
def put(self, x, block=True, timeout=DEFAULT_TIMEOUT):
if self.maxsize > 0 and self.size.value >= self.maxsize:
if block:
start_time = time.time()
while self.size.value >= self.maxsize:
remaining = timeout - (time.time() - start_time)
if remaining <= 0.0:
raise Full
time.sleep(min(remaining, 0.1))
else:
raise Full
self.size.value += 1
return super().put(x, block=block, timeout=timeout)
def get(self, block=True, timeout=DEFAULT_TIMEOUT):
if self.size.value <= 0 and not block:
raise Empty
self.size.value -= 1
return super().get(block=block, timeout=timeout)
def qsize(self):
return self.size
def empty(self):
return self.qsize() == 0
def full(self):
return self.qsize() == self.maxsize
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
"""
:param dct1: First dict to merge
:param dct2: Second dict to merge
:param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
:return: The merge dictionary
"""
merged = copy.deepcopy(dct1)
for k, v2 in dct2.items():
if k in merged:
v1 = merged[k]
if isinstance(v1, dict) and isinstance(v2, Mapping):
merged[k] = deep_merge(v1, v2, override)
elif isinstance(v1, list) and isinstance(v2, list):
if merge_lists:
merged[k] = v1 + v2
else:
if override:
merged[k] = copy.deepcopy(v2)
else:
merged[k] = copy.deepcopy(v2)
return merged
def load_config_with_no_duplicates(raw_config) -> dict:
"""Get config ensuring duplicate keys are not allowed."""
# https://stackoverflow.com/a/71751051
class PreserveDuplicatesLoader(yaml.loader.Loader):
pass
def map_constructor(loader, node, deep=False):
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = {}
for key, val in zip(keys, vals):
if key_count[key] > 1:
raise ValueError(
f"Config input {key} is defined multiple times for the same field, this is not allowed."
)
else:
data[key] = val
return data
PreserveDuplicatesLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
)
return yaml.load(raw_config, PreserveDuplicatesLoader)
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
if "rtsp://" in line:
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
def escape_special_characters(path: str) -> str:
"""Cleans reserved characters to encodings for ffmpeg."""
try:
found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
pw = found[(found.index(":") + 1) :]
return path.replace(pw, urllib.parse.quote_plus(pw))
except AttributeError:
# path does not have user:pass
return path
def get_ffmpeg_arg_list(arg: Any) -> list:
"""Use arg if list or convert to list format."""
return arg if isinstance(arg, list) else shlex.split(arg)
def load_labels(path, encoding="utf-8"):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, "r", encoding=encoding) as f:
labels = {index: "unknown" for index in range(91)}
lines = f.readlines()
if not lines:
return {}
if lines[0].split(" ", maxsplit=1)[0].isdigit():
pairs = [line.split(" ", maxsplit=1) for line in lines]
labels.update({int(index): label.strip() for index, label in pairs})
else:
labels.update({index: line.strip() for index, line in enumerate(lines)})
return labels
def get_tz_modifiers(tz_name: str) -> Tuple[str, str]:
seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
)
hours_offset = int(seconds_offset / 60 / 60)
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier
def to_relative_box(
width: int, height: int, box: Tuple[int, int, int, int]
) -> Tuple[int, int, int, int]:
return (
box[0] / width, # x
box[1] / height, # y
(box[2] - box[0]) / width, # w
(box[3] - box[1]) / height, # h
)

602
frigate/util.py → frigate/util/image.py Executable file → Normal file
View File

@ -1,83 +1,17 @@
import copy """Utilities for creating and manipulating image frames."""
import datetime import datetime
import json
import logging import logging
import os
import re
import shlex
import signal
import subprocess as sp
import traceback
import urllib.parse
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections import Counter
from collections.abc import Mapping
from multiprocessing import shared_memory from multiprocessing import shared_memory
from typing import Any, AnyStr, Optional, Tuple from typing import AnyStr, Optional
import cv2 import cv2
import numpy as np import numpy as np
import psutil
import py3nvml.py3nvml as nvml
import pytz
import yaml
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
"""
:param dct1: First dict to merge
:param dct2: Second dict to merge
:param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
:return: The merge dictionary
"""
merged = copy.deepcopy(dct1)
for k, v2 in dct2.items():
if k in merged:
v1 = merged[k]
if isinstance(v1, dict) and isinstance(v2, Mapping):
merged[k] = deep_merge(v1, v2, override)
elif isinstance(v1, list) and isinstance(v2, list):
if merge_lists:
merged[k] = v1 + v2
else:
if override:
merged[k] = copy.deepcopy(v2)
else:
merged[k] = copy.deepcopy(v2)
return merged
def load_config_with_no_duplicates(raw_config) -> dict:
"""Get config ensuring duplicate keys are not allowed."""
# https://stackoverflow.com/a/71751051
class PreserveDuplicatesLoader(yaml.loader.Loader):
pass
def map_constructor(loader, node, deep=False):
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = {}
for key, val in zip(keys, vals):
if key_count[key] > 1:
raise ValueError(
f"Config input {key} is defined multiple times for the same field, this is not allowed."
)
else:
data[key] = val
return data
PreserveDuplicatesLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
)
return yaml.load(raw_config, PreserveDuplicatesLoader)
def draw_timestamp( def draw_timestamp(
frame, frame,
timestamp, timestamp,
@ -639,432 +573,6 @@ def clipped(obj, frame_shape):
return False return False
def restart_frigate():
proc = psutil.Process(1)
# if this is running via s6, sigterm pid 1
if proc.name() == "s6-svscan":
proc.terminate()
# otherwise, just try and exit frigate
else:
os.kill(os.getpid(), signal.SIGTERM)
class EventsPerSecond:
def __init__(self, max_events=1000, last_n_seconds=10):
self._start = None
self._max_events = max_events
self._last_n_seconds = last_n_seconds
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self._start = now
self._timestamps.append(now)
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events + 100:
self._timestamps = self._timestamps[(1 - self._max_events) :]
self.expire_timestamps(now)
def eps(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self._start = now
# compute the (approximate) events in the last n seconds
self.expire_timestamps(now)
seconds = min(now - self._start, self._last_n_seconds)
# avoid divide by zero
if seconds == 0:
seconds = 1
return len(self._timestamps) / seconds
# remove aged out timestamps
def expire_timestamps(self, now):
threshold = now - self._last_n_seconds
while self._timestamps and self._timestamps[0] < threshold:
del self._timestamps[0]
def print_stack(sig, frame):
traceback.print_stack(frame)
def listen():
signal.signal(signal.SIGUSR1, print_stack)
def create_mask(frame_shape, mask):
mask_img = np.zeros(frame_shape, np.uint8)
mask_img[:] = 255
if isinstance(mask, list):
for m in mask:
add_mask(m, mask_img)
elif isinstance(mask, str):
add_mask(mask, mask_img)
return mask_img
def add_mask(mask, mask_img):
points = mask.split(",")
contour = np.array(
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
)
cv2.fillPoly(mask_img, pts=[contour], color=(0))
def load_labels(path, encoding="utf-8"):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, "r", encoding=encoding) as f:
labels = {index: "unknown" for index in range(91)}
lines = f.readlines()
if not lines:
return {}
if lines[0].split(" ", maxsplit=1)[0].isdigit():
pairs = [line.split(" ", maxsplit=1) for line in lines]
labels.update({int(index): label.strip() for index, label in pairs})
else:
labels.update({index: line.strip() for index, line in enumerate(lines)})
return labels
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
if "rtsp://" in line:
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
def escape_special_characters(path: str) -> str:
"""Cleans reserved characters to encodings for ffmpeg."""
try:
found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
pw = found[(found.index(":") + 1) :]
return path.replace(pw, urllib.parse.quote_plus(pw))
except AttributeError:
# path does not have user:pass
return path
def get_cgroups_version() -> str:
"""Determine what version of cgroups is enabled."""
cgroup_path = "/sys/fs/cgroup"
if not os.path.ismount(cgroup_path):
logger.debug(f"{cgroup_path} is not a mount point.")
return "unknown"
try:
with open("/proc/mounts", "r") as f:
mounts = f.readlines()
for mount in mounts:
mount_info = mount.split()
if mount_info[1] == cgroup_path:
fs_type = mount_info[2]
if fs_type == "cgroup2fs" or fs_type == "cgroup2":
return "cgroup2"
elif fs_type == "tmpfs":
return "cgroup"
else:
logger.debug(
f"Could not determine cgroups version: unhandled filesystem {fs_type}"
)
break
except Exception as e:
logger.debug(f"Could not determine cgroups version: {e}")
return "unknown"
def get_docker_memlimit_bytes() -> int:
"""Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
# check running a supported cgroups version
if get_cgroups_version() == "cgroup2":
memlimit_path = "/sys/fs/cgroup/memory.max"
try:
with open(memlimit_path, "r") as f:
value = f.read().strip()
if value.isnumeric():
return int(value)
elif value.lower() == "max":
return -1
except Exception as e:
logger.debug(f"Unable to get docker memlimit: {e}")
return -1
def get_cpu_stats() -> dict[str, dict]:
"""Get cpu usages for each process id"""
usages = {}
docker_memlimit = get_docker_memlimit_bytes() / 1024
total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
pid = process.info["pid"]
try:
cpu_percent = process.info["cpu_percent"]
cmdline = process.info["cmdline"]
with open(f"/proc/{pid}/stat", "r") as f:
stats = f.readline().split()
utime = int(stats[13])
stime = int(stats[14])
starttime = int(stats[21])
with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0]))
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec
process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
with open(f"/proc/{pid}/statm", "r") as f:
mem_stats = f.readline().split()
mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
if docker_memlimit > 0:
mem_pct = round((mem_res / docker_memlimit) * 100, 1)
else:
mem_pct = round((mem_res / total_mem) * 100, 1)
usages[pid] = {
"cpu": str(cpu_percent),
"cpu_average": str(round(cpu_average_usage, 2)),
"mem": f"{mem_pct}",
"cmdline": " ".join(cmdline),
}
except Exception:
continue
return usages
def get_physical_interfaces(interfaces) -> list:
with open("/proc/net/dev", "r") as file:
lines = file.readlines()
physical_interfaces = []
for line in lines:
if ":" in line:
interface = line.split(":")[0].strip()
for int in interfaces:
if interface.startswith(int):
physical_interfaces.append(interface)
return physical_interfaces
def get_bandwidth_stats(config) -> dict[str, dict]:
"""Get bandwidth usages for each ffmpeg process id"""
usages = {}
top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
config.telemetry.network_interfaces
)
p = sp.run(
top_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
return usages
else:
lines = p.stdout.split("\n")
for line in lines:
stats = list(filter(lambda a: a != "", line.strip().split("\t")))
try:
if re.search(
r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
):
process = stats[0].split("/")
usages[process[len(process) - 2]] = {
"bandwidth": round(float(stats[1]) + float(stats[2]), 1),
}
except (IndexError, ValueError):
continue
return usages
def get_amd_gpu_stats() -> dict[str, str]:
"""Get stats using radeontop."""
radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
p = sp.run(
radeontop_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
return None
else:
usages = p.stdout.split(",")
results: dict[str, str] = {}
for hw in usages:
if "gpu" in hw:
results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
elif "vram" in hw:
results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
return results
def get_intel_gpu_stats() -> dict[str, str]:
"""Get stats using intel_gpu_top."""
intel_gpu_top_command = [
"timeout",
"0.5s",
"intel_gpu_top",
"-J",
"-o",
"-",
"-s",
"1",
]
p = sp.run(
intel_gpu_top_command,
encoding="ascii",
capture_output=True,
)
# timeout has a non-zero returncode when timeout is reached
if p.returncode != 124:
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
return None
else:
reading = "".join(p.stdout.split())
results: dict[str, str] = {}
# render is used for qsv
render = []
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[14:])
single = packet.get("busy", 0.0)
render.append(float(single))
if render:
render_avg = sum(render) / len(render)
else:
render_avg = 1
# video is used for vaapi
video = []
for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[10:])
single = packet.get("busy", 0.0)
video.append(float(single))
if video:
video_avg = sum(video) / len(video)
else:
video_avg = 1
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
results["mem"] = "-%"
return results
def try_get_info(f, h, default="N/A"):
try:
v = f(h)
except nvml.NVMLError_NotSupported:
v = default
return v
def get_nvidia_gpu_stats() -> dict[int, dict]:
results = {}
try:
nvml.nvmlInit()
deviceCount = nvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
if util != "N/A":
gpu_util = util.gpu
else:
gpu_util = 0
if meminfo != "N/A":
gpu_mem_util = meminfo.used / meminfo.total * 100
else:
gpu_mem_util = -1
results[i] = {
"name": nvml.nvmlDeviceGetName(handle),
"gpu": gpu_util,
"mem": gpu_mem_util,
}
except Exception:
pass
finally:
return results
def ffprobe_stream(path: str) -> sp.CompletedProcess:
"""Run ffprobe on stream."""
clean_path = escape_special_characters(path)
ffprobe_cmd = [
"ffprobe",
"-timeout",
"1000000",
"-print_format",
"json",
"-show_entries",
"stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
"-loglevel",
"quiet",
clean_path,
]
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
"""Run vainfo."""
ffprobe_cmd = (
["vainfo"]
if not device_name
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
)
return sp.run(ffprobe_cmd, capture_output=True)
def get_ffmpeg_arg_list(arg: Any) -> list:
"""Use arg if list or convert to list format."""
return arg if isinstance(arg, list) else shlex.split(arg)
class FrameManager(ABC): class FrameManager(ABC):
@abstractmethod @abstractmethod
def create(self, name, size) -> AnyStr: def create(self, name, size) -> AnyStr:
@ -1132,89 +640,23 @@ class SharedMemoryFrameManager(FrameManager):
del self.shm_store[name] del self.shm_store[name]
def get_tz_modifiers(tz_name: str) -> Tuple[str, str]: def create_mask(frame_shape, mask):
seconds_offset = ( mask_img = np.zeros(frame_shape, np.uint8)
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds() mask_img[:] = 255
if isinstance(mask, list):
for m in mask:
add_mask(m, mask_img)
elif isinstance(mask, str):
add_mask(mask, mask_img)
return mask_img
def add_mask(mask, mask_img):
points = mask.split(",")
contour = np.array(
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
) )
hours_offset = int(seconds_offset / 60 / 60) cv2.fillPoly(mask_img, pts=[contour], color=(0))
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier
def to_relative_box(
width: int, height: int, box: Tuple[int, int, int, int]
) -> Tuple[int, int, int, int]:
return (
box[0] / width, # x
box[1] / height, # y
(box[2] - box[0]) / width, # w
(box[3] - box[1]) / height, # h
)
def get_video_properties(url, get_duration=False):
def calculate_duration(video: Optional[any]) -> float:
duration = None
if video is not None:
# Get the frames per second (fps) of the video stream
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if fps and total_frames:
duration = total_frames / fps
# if cv2 failed need to use ffprobe
if duration is None:
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{url}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
else:
duration = -1
return duration
width = height = 0
try:
# Open the video stream
video = cv2.VideoCapture(url)
# Check if the video stream was opened successfully
if not video.isOpened():
video = None
except Exception:
video = None
result = {}
if get_duration:
result["duration"] = calculate_duration(video)
if video is not None:
# Get the width of frames in the video stream
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# Get the height of frames in the video stream
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video stream
video.release()
result["width"] = round(width)
result["height"] = round(height)
return result

403
frigate/util/services.py Normal file
View File

@ -0,0 +1,403 @@
"""Utilities for services."""
import json
import logging
import os
import re
import signal
import subprocess as sp
import traceback
from typing import Optional
import cv2
import psutil
import py3nvml.py3nvml as nvml
from frigate.util.builtin import escape_special_characters
logger = logging.getLogger(__name__)
def restart_frigate():
proc = psutil.Process(1)
# if this is running via s6, sigterm pid 1
if proc.name() == "s6-svscan":
proc.terminate()
# otherwise, just try and exit frigate
else:
os.kill(os.getpid(), signal.SIGTERM)
def print_stack(sig, frame):
traceback.print_stack(frame)
def listen():
signal.signal(signal.SIGUSR1, print_stack)
def get_cgroups_version() -> str:
"""Determine what version of cgroups is enabled."""
cgroup_path = "/sys/fs/cgroup"
if not os.path.ismount(cgroup_path):
logger.debug(f"{cgroup_path} is not a mount point.")
return "unknown"
try:
with open("/proc/mounts", "r") as f:
mounts = f.readlines()
for mount in mounts:
mount_info = mount.split()
if mount_info[1] == cgroup_path:
fs_type = mount_info[2]
if fs_type == "cgroup2fs" or fs_type == "cgroup2":
return "cgroup2"
elif fs_type == "tmpfs":
return "cgroup"
else:
logger.debug(
f"Could not determine cgroups version: unhandled filesystem {fs_type}"
)
break
except Exception as e:
logger.debug(f"Could not determine cgroups version: {e}")
return "unknown"
def get_docker_memlimit_bytes() -> int:
"""Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
# check running a supported cgroups version
if get_cgroups_version() == "cgroup2":
memlimit_path = "/sys/fs/cgroup/memory.max"
try:
with open(memlimit_path, "r") as f:
value = f.read().strip()
if value.isnumeric():
return int(value)
elif value.lower() == "max":
return -1
except Exception as e:
logger.debug(f"Unable to get docker memlimit: {e}")
return -1
def get_cpu_stats() -> dict[str, dict]:
"""Get cpu usages for each process id"""
usages = {}
docker_memlimit = get_docker_memlimit_bytes() / 1024
total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
pid = process.info["pid"]
try:
cpu_percent = process.info["cpu_percent"]
cmdline = process.info["cmdline"]
with open(f"/proc/{pid}/stat", "r") as f:
stats = f.readline().split()
utime = int(stats[13])
stime = int(stats[14])
starttime = int(stats[21])
with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0]))
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec
process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
with open(f"/proc/{pid}/statm", "r") as f:
mem_stats = f.readline().split()
mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
if docker_memlimit > 0:
mem_pct = round((mem_res / docker_memlimit) * 100, 1)
else:
mem_pct = round((mem_res / total_mem) * 100, 1)
usages[pid] = {
"cpu": str(cpu_percent),
"cpu_average": str(round(cpu_average_usage, 2)),
"mem": f"{mem_pct}",
"cmdline": " ".join(cmdline),
}
except Exception:
continue
return usages
def get_physical_interfaces(interfaces) -> list:
with open("/proc/net/dev", "r") as file:
lines = file.readlines()
physical_interfaces = []
for line in lines:
if ":" in line:
interface = line.split(":")[0].strip()
for int in interfaces:
if interface.startswith(int):
physical_interfaces.append(interface)
return physical_interfaces
def get_bandwidth_stats(config) -> dict[str, dict]:
"""Get bandwidth usages for each ffmpeg process id"""
usages = {}
top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
config.telemetry.network_interfaces
)
p = sp.run(
top_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
return usages
else:
lines = p.stdout.split("\n")
for line in lines:
stats = list(filter(lambda a: a != "", line.strip().split("\t")))
try:
if re.search(
r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
):
process = stats[0].split("/")
usages[process[len(process) - 2]] = {
"bandwidth": round(float(stats[1]) + float(stats[2]), 1),
}
except (IndexError, ValueError):
continue
return usages
def get_amd_gpu_stats() -> dict[str, str]:
"""Get stats using radeontop."""
radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
p = sp.run(
radeontop_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
return None
else:
usages = p.stdout.split(",")
results: dict[str, str] = {}
for hw in usages:
if "gpu" in hw:
results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
elif "vram" in hw:
results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
return results
def get_intel_gpu_stats() -> dict[str, str]:
"""Get stats using intel_gpu_top."""
intel_gpu_top_command = [
"timeout",
"0.5s",
"intel_gpu_top",
"-J",
"-o",
"-",
"-s",
"1",
]
p = sp.run(
intel_gpu_top_command,
encoding="ascii",
capture_output=True,
)
# timeout has a non-zero returncode when timeout is reached
if p.returncode != 124:
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
return None
else:
reading = "".join(p.stdout.split())
results: dict[str, str] = {}
# render is used for qsv
render = []
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[14:])
single = packet.get("busy", 0.0)
render.append(float(single))
if render:
render_avg = sum(render) / len(render)
else:
render_avg = 1
# video is used for vaapi
video = []
for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[10:])
single = packet.get("busy", 0.0)
video.append(float(single))
if video:
video_avg = sum(video) / len(video)
else:
video_avg = 1
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
results["mem"] = "-%"
return results
def try_get_info(f, h, default="N/A"):
try:
v = f(h)
except nvml.NVMLError_NotSupported:
v = default
return v
def get_nvidia_gpu_stats() -> dict[int, dict]:
results = {}
try:
nvml.nvmlInit()
deviceCount = nvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
if util != "N/A":
gpu_util = util.gpu
else:
gpu_util = 0
if meminfo != "N/A":
gpu_mem_util = meminfo.used / meminfo.total * 100
else:
gpu_mem_util = -1
results[i] = {
"name": nvml.nvmlDeviceGetName(handle),
"gpu": gpu_util,
"mem": gpu_mem_util,
}
except Exception:
pass
finally:
return results
def ffprobe_stream(path: str) -> sp.CompletedProcess:
"""Run ffprobe on stream."""
clean_path = escape_special_characters(path)
ffprobe_cmd = [
"ffprobe",
"-timeout",
"1000000",
"-print_format",
"json",
"-show_entries",
"stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
"-loglevel",
"quiet",
clean_path,
]
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
"""Run vainfo."""
ffprobe_cmd = (
["vainfo"]
if not device_name
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
)
return sp.run(ffprobe_cmd, capture_output=True)
def get_video_properties(url, get_duration=False):
def calculate_duration(video: Optional[any]) -> float:
duration = None
if video is not None:
# Get the frames per second (fps) of the video stream
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if fps and total_frames:
duration = total_frames / fps
# if cv2 failed need to use ffprobe
if duration is None:
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{url}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
else:
duration = -1
return duration
width = height = 0
try:
# Open the video stream
video = cv2.VideoCapture(url)
# Check if the video stream was opened successfully
if not video.isOpened():
video = None
except Exception:
video = None
result = {}
if get_duration:
result["duration"] = calculate_duration(video)
if video is not None:
# Get the width of frames in the video stream
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# Get the height of frames in the video stream
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video stream
video.release()
result["width"] = round(width)
result["height"] = round(height)
return result

View File

@ -11,10 +11,11 @@ import time
from collections import defaultdict from collections import defaultdict
import cv2 import cv2
import faster_fifo as ff
import numpy as np import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.config import CameraConfig, DetectConfig from frigate.config import CameraConfig, DetectConfig, ModelConfig
from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
from frigate.detectors.detector_config import PixelFormatEnum from frigate.detectors.detector_config import PixelFormatEnum
from frigate.log import LogPipe from frigate.log import LogPipe
@ -23,8 +24,8 @@ from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection import RemoteObjectDetector from frigate.object_detection import RemoteObjectDetector
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker from frigate.track.norfair_tracker import NorfairTracker
from frigate.util import ( from frigate.util.builtin import EventsPerSecond
EventsPerSecond, from frigate.util.image import (
FrameManager, FrameManager,
SharedMemoryFrameManager, SharedMemoryFrameManager,
area, area,
@ -32,11 +33,11 @@ from frigate.util import (
draw_box_with_label, draw_box_with_label,
intersection, intersection,
intersection_over_union, intersection_over_union,
listen,
yuv_region_2_bgr, yuv_region_2_bgr,
yuv_region_2_rgb, yuv_region_2_rgb,
yuv_region_2_yuv, yuv_region_2_yuv,
) )
from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -95,7 +96,17 @@ def filtered(obj, objects_to_track, object_filters):
return False return False
def create_tensor_input(frame, model_config, region): def get_min_region_size(model_config: ModelConfig) -> int:
"""Get the min region size and ensure it is divisible by 4."""
half = int(max(model_config.height, model_config.width) / 2)
if half % 4 == 0:
return half
return int((half + 3) / 4) * 4
def create_tensor_input(frame, model_config: ModelConfig, region):
if model_config.input_pixel_format == PixelFormatEnum.rgb: if model_config.input_pixel_format == PixelFormatEnum.rgb:
cropped_frame = yuv_region_2_rgb(frame, region) cropped_frame = yuv_region_2_rgb(frame, region)
elif model_config.input_pixel_format == PixelFormatEnum.bgr: elif model_config.input_pixel_format == PixelFormatEnum.bgr:
@ -195,17 +206,16 @@ def capture_frames(
frame_rate.update() frame_rate.update()
# if the queue is full, skip this frame # don't lock the queue to check, just try since it should rarely be full
if frame_queue.full(): try:
# add to the queue
frame_queue.put(current_frame.value, False)
# close the frame
frame_manager.close(frame_name)
except queue.Full:
# if the queue is full, skip this frame
skipped_eps.update() skipped_eps.update()
frame_manager.delete(frame_name) frame_manager.delete(frame_name)
continue
# close the frame
frame_manager.close(frame_name)
# add to the queue
frame_queue.put(current_frame.value)
class CameraWatchdog(threading.Thread): class CameraWatchdog(threading.Thread):
@ -717,15 +727,15 @@ def get_consolidated_object_detections(detected_object_groups):
def process_frames( def process_frames(
camera_name: str, camera_name: str,
frame_queue: mp.Queue, frame_queue: ff.Queue,
frame_shape, frame_shape,
model_config, model_config: ModelConfig,
detect_config: DetectConfig, detect_config: DetectConfig,
frame_manager: FrameManager, frame_manager: FrameManager,
motion_detector: MotionDetector, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, detected_objects_queue: ff.Queue,
process_info: dict, process_info: dict,
objects_to_track: list[str], objects_to_track: list[str],
object_filters, object_filters,
@ -743,16 +753,18 @@ def process_frames(
startup_scan_counter = 0 startup_scan_counter = 0
region_min_size = int(max(model_config.height, model_config.width) / 2) region_min_size = get_min_region_size(model_config)
while not stop_event.is_set(): while not stop_event.is_set():
if exit_on_empty and frame_queue.empty():
logger.info("Exiting track_objects...")
break
try: try:
frame_time = frame_queue.get(True, 1) if exit_on_empty:
frame_time = frame_queue.get(False)
else:
frame_time = frame_queue.get(True, 1)
except queue.Empty: except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue continue
current_frame_time.value = frame_time current_frame_time.value = frame_time

View File

@ -5,7 +5,7 @@ import time
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
from frigate.util import restart_frigate from frigate.util.services import restart_frigate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -334,7 +334,7 @@ export default function System() {
<ActivityIndicator /> <ActivityIndicator />
) : ( ) : (
<div data-testid="cameras" className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4"> <div data-testid="cameras" className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4">
{cameraNames.map((camera) => ( {cameraNames.map((camera) => ( config.cameras[camera]["enabled"] && (
<div key={camera} className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow"> <div key={camera} className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow">
<div className="capitalize text-lg flex justify-between p-4"> <div className="capitalize text-lg flex justify-between p-4">
<Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link> <Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link>
@ -406,7 +406,7 @@ export default function System() {
</Tbody> </Tbody>
</Table> </Table>
</div> </div>
</div> </div> )
))} ))}
</div> </div>
)} )}