diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 9c5dec5bb..1a1832f3b 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \ sudo mkdir -p /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate +# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the +# s6 service file. For dev, where frigate is started from an interactive +# shell, we define it in .bashrc instead. +echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc + make version cd web diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index 0a835550e..f2cc40fcf 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -44,6 +44,7 @@ function migrate_db_path() { echo "[INFO] Preparing Frigate..." migrate_db_path +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') echo "[INFO] Starting Frigate..." diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py index 1397adee8..0531b173d 100644 --- a/docker/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/rootfs/usr/local/go2rtc/create_config.py @@ -7,7 +7,7 @@ import sys import yaml sys.path.insert(0, "/opt/frigate") -from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402 +from frigate.const import BIRDSEYE_PIPE # noqa: E402 from frigate.ffmpeg_presets import ( # noqa: E402 parse_preset_hardware_acceleration_encode, ) @@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None: go2rtc_config["rtsp"]["default_query"] = "mp4" # need to replace ffmpeg command when using ffmpeg4 -if not os.path.exists(BTBN_PATH): +if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: if go2rtc_config.get("ffmpeg") is None: go2rtc_config["ffmpeg"] = { "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" diff --git a/frigate/app.py b/frigate/app.py index ccfbd4696..4d4aa5dd4 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -437,6 +437,7 @@ class FrigateApp: ) audio_process.daemon = True audio_process.start() + self.processes["audioDetector"] = audio_process.pid or 0 logger.info(f"Audio process started: {audio_process.pid}") def start_timeline_processor(self) -> None: diff --git a/frigate/const.py b/frigate/const.py index 20e2b0daa..b6b0e44bd 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -11,7 +11,6 @@ YAML_EXT = (".yaml", ".yml") FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" -BTBN_PATH = "/usr/lib/btbn-ffmpeg" # Attributes diff --git a/frigate/events/audio.py b/frigate/events/audio.py index f2ed1130e..de0f07e0c 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -188,7 +188,7 @@ class AudioEventMaintainer(threading.Thread): else: resp = requests.post( f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create", - json={"duration": None}, + json={"duration": None, "source_type": "audio"}, ) if resp.status_code == 200: @@ -210,14 +210,19 @@ class AudioEventMaintainer(threading.Thread): now - detection.get("last_detection", now) > self.config.audio.max_not_heard ): - requests.put( + resp = requests.put( f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end", json={ "end_time": detection["last_detection"] + self.config.record.events.post_capture }, ) - self.detections[detection["label"]] = None + if resp.status_code == 200: + self.detections[detection["label"]] = None + else: + logger.warn( + f"Failed to end audio event {detection['id']} with status code {resp.status_code}" + ) def restart_audio_pipe(self) -> None: try: diff --git a/frigate/events/external.py b/frigate/events/external.py index c10a3db09..a801e6d24 100644 --- a/frigate/events/external.py +++ b/frigate/events/external.py @@ -29,6 +29,7 @@ class ExternalEventProcessor: self, camera: str, label: str, + source_type: str, sub_label: Optional[str], duration: Optional[int], include_recording: bool, @@ -56,11 +57,16 @@ class ExternalEventProcessor: "label": label, "sub_label": sub_label, "camera": camera, - "start_time": now, - "end_time": now + duration if duration is not None else None, + "start_time": now - camera_config.record.events.pre_capture, + "end_time": now + + duration + + camera_config.record.events.post_capture + if duration is not None + else None, "thumbnail": thumbnail, "has_clip": camera_config.record.enabled and include_recording, "has_snapshot": True, + "type": source_type, }, ) ) diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 68e2ec6ad..9640128e1 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -193,6 +193,7 @@ class EventProcessor(threading.Thread): "score": score, "top_score": event_data["top_score"], "attributes": attributes, + "type": "object", }, } @@ -216,8 +217,8 @@ class EventProcessor(threading.Thread): del self.events_in_process[event_data["id"]] self.event_processed_queue.put((event_data["id"], camera)) - def handle_external_detection(self, type: str, event_data: Event) -> None: - if type == "new": + def handle_external_detection(self, event_type: str, event_data: Event) -> None: + if event_type == "new": event = { Event.id: event_data["id"], Event.label: event_data["label"], @@ -229,10 +230,10 @@ class EventProcessor(threading.Thread): Event.has_clip: event_data["has_clip"], Event.has_snapshot: event_data["has_snapshot"], Event.zones: [], - Event.data: {}, + Event.data: {"type": event_data["type"]}, } Event.insert(event).execute() - elif type == "end": + elif event_type == "end": event = { Event.id: event_data["id"], Event.end_time: event_data["end_time"], diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index b20bac057..43d2504bd 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -5,7 +5,6 @@ import os from enum import Enum from typing import Any -from frigate.const import BTBN_PATH from frigate.util.services import vainfo_hwaccel from frigate.version import VERSION @@ -43,7 +42,11 @@ class LibvaGpuSelector: return "" -TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" +TIMEOUT_PARAM = ( + "-timeout" + if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59 + else "-stimeout" +) _gpu_selector = LibvaGpuSelector() _user_agent_args = [ @@ -107,14 +110,14 @@ PRESETS_HW_ACCEL_DECODE = { } PRESETS_HW_ACCEL_SCALE = { - "preset-rpi-32-h264": "-r {0} -s {1}x{2}", - "preset-rpi-64-h264": "-r {0} -s {1}x{2}", + "preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}", + "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", - "default": "-r {0} -s {1}x{2}", + "default": "-r {0} -vf fps={0},scale={1}:{2}", } PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { diff --git a/frigate/http.py b/frigate/http.py index aad433c9c..fe6dc54ef 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -879,6 +879,7 @@ def create_event(camera_name, label): event_id = current_app.external_processor.create_manual_event( camera_name, label, + json.get("source_type", "api"), json.get("sub_label", None), json.get("duration", 30), json.get("include_recording", True), diff --git a/frigate/output.py b/frigate/output.py index e21632d7d..80f084edb 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -33,6 +33,61 @@ from frigate.util.image import ( logger = logging.getLogger(__name__) +def get_standard_aspect_ratio(width, height) -> tuple[int, int]: + """Ensure that only standard aspect ratios are used.""" + known_aspects = [ + (16, 9), + (9, 16), + (32, 9), + (12, 9), + (9, 12), + ] # aspects are scaled to have common relative size + known_aspects_ratios = list( + map(lambda aspect: aspect[0] / aspect[1], known_aspects) + ) + closest = min( + known_aspects_ratios, + key=lambda x: abs(x - (width / height)), + ) + return known_aspects[known_aspects_ratios.index(closest)] + + +class Canvas: + def __init__(self, canvas_width: int, canvas_height: int) -> None: + gcd = math.gcd(canvas_width, canvas_height) + self.aspect = get_standard_aspect_ratio( + (canvas_width / gcd), (canvas_height / gcd) + ) + self.width = canvas_width + self.height = (self.width * self.aspect[1]) / self.aspect[0] + self.coefficient_cache: dict[int, int] = {} + self.aspect_cache: dict[str, tuple[int, int]] = {} + + def get_aspect(self, coefficient: int) -> tuple[int, int]: + return (self.aspect[0] * coefficient, self.aspect[1] * coefficient) + + def get_coefficient(self, camera_count: int) -> int: + return self.coefficient_cache.get(camera_count, 2) + + def set_coefficient(self, camera_count: int, coefficient: int) -> None: + self.coefficient_cache[camera_count] = coefficient + + def get_camera_aspect( + self, cam_name: str, camera_width: int, camera_height: int + ) -> tuple[int, int]: + cached = self.aspect_cache.get(cam_name) + + if cached: + return cached + + gcd = math.gcd(camera_width, camera_height) + camera_aspect = get_standard_aspect_ratio( + camera_width / gcd, camera_height / gcd + ) + self.aspect_cache[cam_name] = camera_aspect + return camera_aspect + + class FFMpegConverter: def __init__( self, @@ -174,6 +229,7 @@ class BirdsEyeFrameManager: self.frame_shape = (height, width) self.yuv_shape = (height * 3 // 2, width) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) + self.canvas = Canvas(width, height) self.stop_event = stop_event # initialize the frame as black and with the Frigate logo @@ -322,16 +378,15 @@ class BirdsEyeFrameManager: ), ) - canvas_width = self.config.birdseye.width - canvas_height = self.config.birdseye.height - if len(active_cameras) == 1: # show single camera as fullscreen camera = active_cameras_to_add[0] camera_dims = self.cameras[camera]["dimensions"].copy() - scaled_width = int(canvas_height * camera_dims[0] / camera_dims[1]) + scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1]) coefficient = ( - 1 if scaled_width <= canvas_width else canvas_width / scaled_width + 1 + if scaled_width <= self.canvas.width + else self.canvas.width / scaled_width ) self.camera_layout = [ [ @@ -341,14 +396,14 @@ class BirdsEyeFrameManager: 0, 0, int(scaled_width * coefficient), - int(canvas_height * coefficient), + int(self.canvas.height * coefficient), ), ) ] ] else: # calculate optimal layout - coefficient = 2 + coefficient = self.canvas.get_coefficient(len(active_cameras)) calculating = True # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas @@ -357,7 +412,6 @@ class BirdsEyeFrameManager: return layout_candidate = self.calculate_layout( - (canvas_width, canvas_height), active_cameras_to_add, coefficient, ) @@ -371,6 +425,7 @@ class BirdsEyeFrameManager: return calculating = False + self.canvas.set_coefficient(len(active_cameras), coefficient) self.camera_layout = layout_candidate @@ -382,9 +437,7 @@ class BirdsEyeFrameManager: return True - def calculate_layout( - self, canvas, cameras_to_add: list[str], coefficient - ) -> tuple[any]: + def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]: """Calculate the optimal layout for 2+ cameras.""" def map_layout(row_height: int): @@ -401,23 +454,20 @@ class BirdsEyeFrameManager: x = starting_x for cameras in row: camera_dims = self.cameras[cameras[0]]["dimensions"].copy() + camera_aspect = cameras[1] if camera_dims[1] > camera_dims[0]: scaled_height = int(row_height * 2) - scaled_width = int( - scaled_height * camera_dims[0] / camera_dims[1] - ) + scaled_width = int(scaled_height * camera_aspect) starting_x = scaled_width else: scaled_height = row_height - scaled_width = int( - scaled_height * camera_dims[0] / camera_dims[1] - ) + scaled_width = int(scaled_height * camera_aspect) # layout is too large if ( - x + scaled_width > canvas_width - or y + scaled_height > canvas_height + x + scaled_width > self.canvas.width + or y + scaled_height > self.canvas.height ): return 0, 0, None @@ -429,13 +479,9 @@ class BirdsEyeFrameManager: return max_width, y, candidate_layout - canvas_width = canvas[0] - canvas_height = canvas[1] + canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient) camera_layout: list[list[any]] = [] camera_layout.append([]) - canvas_gcd = math.gcd(canvas[0], canvas[1]) - canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient - canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient starting_x = 0 x = starting_x y = 0 @@ -443,18 +489,9 @@ class BirdsEyeFrameManager: max_y = 0 for camera in cameras_to_add: camera_dims = self.cameras[camera]["dimensions"].copy() - camera_gcd = math.gcd(camera_dims[0], camera_dims[1]) - camera_aspect_x = camera_dims[0] / camera_gcd - camera_aspect_y = camera_dims[1] / camera_gcd - - if round(camera_aspect_x / camera_aspect_y, 1) == 1.8: - # account for slightly off 16:9 cameras - camera_aspect_x = 16 - camera_aspect_y = 9 - elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3: - # make 4:3 cameras the same relative size as 16:9 - camera_aspect_x = 12 - camera_aspect_y = 9 + camera_aspect_x, camera_aspect_y = self.canvas.get_camera_aspect( + camera, camera_dims[0], camera_dims[1] + ) if camera_dims[1] > camera_dims[0]: portrait = True @@ -466,10 +503,7 @@ class BirdsEyeFrameManager: camera_layout[y_i].append( ( camera, - ( - camera_aspect_x, - camera_aspect_y, - ), + camera_aspect_x / camera_aspect_y, ) ) @@ -495,7 +529,7 @@ class BirdsEyeFrameManager: camera_layout[y_i].append( ( camera, - (camera_aspect_x, camera_aspect_y), + camera_aspect_x / camera_aspect_y, ) ) x += camera_aspect_x @@ -503,15 +537,16 @@ class BirdsEyeFrameManager: if y + max_y > canvas_aspect_y: return None - row_height = int(canvas_height / coefficient) + row_height = int(self.canvas.height / coefficient) total_width, total_height, standard_candidate_layout = map_layout(row_height) # layout can't be optimized more - if total_width / canvas_width >= 0.99: + if total_width / self.canvas.width >= 0.99: return standard_candidate_layout scale_up_percent = min( - 1 - (total_width / canvas_width), 1 - (total_height / canvas_height) + 1 - (total_width / self.canvas.width), + 1 - (total_height / self.canvas.height), ) row_height = int(row_height * (1 + round(scale_up_percent, 1))) _, _, scaled_layout = map_layout(row_height) diff --git a/frigate/video.py b/frigate/video.py index 66779944c..06a6ac6a6 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -14,7 +14,7 @@ import cv2 import numpy as np from setproctitle import setproctitle -from frigate.config import CameraConfig, DetectConfig +from frigate.config import CameraConfig, DetectConfig, ModelConfig from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR from frigate.detectors.detector_config import PixelFormatEnum from frigate.log import LogPipe @@ -95,7 +95,17 @@ def filtered(obj, objects_to_track, object_filters): return False -def create_tensor_input(frame, model_config, region): +def get_min_region_size(model_config: ModelConfig) -> int: + """Get the min region size and ensure it is divisible by 4.""" + half = int(max(model_config.height, model_config.width) / 2) + + if half % 4 == 0: + return half + + return int((half + 3) / 4) * 4 + + +def create_tensor_input(frame, model_config: ModelConfig, region): if model_config.input_pixel_format == PixelFormatEnum.rgb: cropped_frame = yuv_region_2_rgb(frame, region) elif model_config.input_pixel_format == PixelFormatEnum.bgr: @@ -719,7 +729,7 @@ def process_frames( camera_name: str, frame_queue: mp.Queue, frame_shape, - model_config, + model_config: ModelConfig, detect_config: DetectConfig, frame_manager: FrameManager, motion_detector: MotionDetector, @@ -743,7 +753,7 @@ def process_frames( startup_scan_counter = 0 - region_min_size = int(max(model_config.height, model_config.width) / 2) + region_min_size = get_min_region_size(model_config) while not stop_event.is_set(): if exit_on_empty and frame_queue.empty(): diff --git a/web/src/routes/System.jsx b/web/src/routes/System.jsx index 82c8d619d..8f4f1436f 100644 --- a/web/src/routes/System.jsx +++ b/web/src/routes/System.jsx @@ -334,7 +334,7 @@ export default function System() { ) : (
- {cameraNames.map((camera) => ( + {cameraNames.map((camera) => ( config.cameras[camera]["enabled"] && (
{camera.replaceAll('_', ' ')} @@ -406,7 +406,7 @@ export default function System() {
-
+
) ))} )}