diff --git a/frigate/app.py b/frigate/app.py index 2755b0a95..d54a9abe2 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -45,7 +45,7 @@ from frigate.record.record import manage_recordings from frigate.stats import StatsEmitter, stats_init from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor -from frigate.types import CameraMetricsTypes, RecordMetricsTypes +from frigate.types import CameraMetricsTypes, FeatureMetricsTypes from frigate.version import VERSION from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog @@ -63,7 +63,7 @@ class FrigateApp: self.log_queue: Queue = mp.Queue() self.plus_api = PlusApi() self.camera_metrics: dict[str, CameraMetricsTypes] = {} - self.record_metrics: dict[str, RecordMetricsTypes] = {} + self.feature_metrics: dict[str, FeatureMetricsTypes] = {} self.processes: dict[str, int] = {} def set_environment_vars(self) -> None: @@ -105,25 +105,19 @@ class FrigateApp: user_config = FrigateConfig.parse_file(config_file) self.config = user_config.runtime_config(self.plus_api) - for camera_name in self.config.cameras.keys(): + for camera_name, camera_config in self.config.cameras.items(): # create camera_metrics self.camera_metrics[camera_name] = { "camera_fps": mp.Value("d", 0.0), "skipped_fps": mp.Value("d", 0.0), "process_fps": mp.Value("d", 0.0), - "detection_enabled": mp.Value( - "i", self.config.cameras[camera_name].detect.enabled - ), + "detection_enabled": mp.Value("i", camera_config.detect.enabled), "motion_enabled": mp.Value("i", True), "improve_contrast_enabled": mp.Value( - "i", self.config.cameras[camera_name].motion.improve_contrast - ), - "motion_threshold": mp.Value( - "i", self.config.cameras[camera_name].motion.threshold - ), - "motion_contour_area": mp.Value( - "i", self.config.cameras[camera_name].motion.contour_area + "i", camera_config.motion.improve_contrast ), + "motion_threshold": mp.Value("i", camera_config.motion.threshold), + "motion_contour_area": mp.Value("i", camera_config.motion.contour_area), "detection_fps": mp.Value("d", 0.0), "detection_frame": mp.Value("d", 0.0), "read_start": mp.Value("d", 0.0), @@ -132,10 +126,9 @@ class FrigateApp: "capture_process": None, "process": None, } - self.record_metrics[camera_name] = { - "record_enabled": mp.Value( - "i", self.config.cameras[camera_name].record.enabled - ) + self.feature_metrics[camera_name] = { + "audio_enabled": mp.Value("i", camera_config.audio.enabled), + "record_enabled": mp.Value("i", camera_config.record.enabled), } def set_log_levels(self) -> None: @@ -223,7 +216,7 @@ class FrigateApp: recording_process = mp.Process( target=manage_recordings, name="recording_manager", - args=(self.config, self.recordings_info_queue, self.record_metrics), + args=(self.config, self.recordings_info_queue, self.feature_metrics), ) recording_process.daemon = True self.recording_process = recording_process @@ -282,7 +275,7 @@ class FrigateApp: self.config, self.onvif_controller, self.camera_metrics, - self.record_metrics, + self.feature_metrics, comms, ) @@ -394,7 +387,9 @@ class FrigateApp: def start_audio_processors(self) -> None: if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0: audio_process = mp.Process( - target=listen_to_audio, name=f"audio_capture", args=(self.config,) + target=listen_to_audio, + name=f"audio_capture", + args=(self.config, self.feature_metrics), ) audio_process.daemon = True audio_process.start() diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index b7e9e8858..1c9105ce8 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -6,7 +6,7 @@ from typing import Any, Callable from frigate.config import FrigateConfig from frigate.ptz import OnvifCommandEnum, OnvifController -from frigate.types import CameraMetricsTypes, RecordMetricsTypes +from frigate.types import CameraMetricsTypes, FeatureMetricsTypes from frigate.util import restart_frigate logger = logging.getLogger(__name__) @@ -39,19 +39,20 @@ class Dispatcher: config: FrigateConfig, onvif: OnvifController, camera_metrics: dict[str, CameraMetricsTypes], - record_metrics: dict[str, RecordMetricsTypes], + feature_metrics: dict[str, FeatureMetricsTypes], communicators: list[Communicator], ) -> None: self.config = config self.onvif = onvif self.camera_metrics = camera_metrics - self.record_metrics = record_metrics + self.feature_metrics = feature_metrics self.comms = communicators for comm in self.comms: comm.subscribe(self._receive) self._camera_settings_handlers: dict[str, Callable] = { + "audio": self._on_audio_command, "detect": self._on_detect_command, "improve_contrast": self._on_motion_improve_contrast_command, "motion": self._on_motion_command, @@ -186,6 +187,29 @@ class Dispatcher: motion_settings.threshold = payload # type: ignore[union-attr] self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True) + def _on_audio_command(self, camera_name: str, payload: str) -> None: + """Callback for audio topic.""" + audio_settings = self.config.cameras[camera_name].audio + + if payload == "ON": + if not self.config.cameras[camera_name].audio.enabled_in_config: + logger.error( + "Audio detection must be enabled in the config to be turned on via MQTT." + ) + return + + if not audio_settings.enabled: + logger.info(f"Turning on audio detection for {camera_name}") + audio_settings.enabled = True + self.feature_metrics[camera_name]["audio_enabled"].value = True + elif payload == "OFF": + if self.feature_metrics[camera_name]["audio_enabled"].value: + logger.info(f"Turning off audio detection for {camera_name}") + audio_settings.enabled = False + self.feature_metrics[camera_name]["audio_enabled"].value = False + + self.publish(f"{camera_name}/audio/state", payload, retain=True) + def _on_recordings_command(self, camera_name: str, payload: str) -> None: """Callback for recordings topic.""" record_settings = self.config.cameras[camera_name].record @@ -200,12 +224,12 @@ class Dispatcher: if not record_settings.enabled: logger.info(f"Turning on recordings for {camera_name}") record_settings.enabled = True - self.record_metrics[camera_name]["record_enabled"].value = True + self.feature_metrics[camera_name]["record_enabled"].value = True elif payload == "OFF": - if self.record_metrics[camera_name]["record_enabled"].value: + if self.feature_metrics[camera_name]["record_enabled"].value: logger.info(f"Turning off recordings for {camera_name}") record_settings.enabled = False - self.record_metrics[camera_name]["record_enabled"].value = False + self.feature_metrics[camera_name]["record_enabled"].value = False self.publish(f"{camera_name}/recordings/state", payload, retain=True) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 8e9c711ae..6c917c344 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -23,10 +23,10 @@ from frigate.const import ( AUDIO_SAMPLE_RATE, CACHE_DIR, ) -from frigate.events.maintainer import EventTypeEnum from frigate.ffmpeg_presets import parse_preset_input from frigate.log import LogPipe from frigate.object_detection import load_labels +from frigate.types import FeatureMetricsTypes from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg from frigate.util import get_ffmpeg_arg_list, listen @@ -42,16 +42,16 @@ FFMPEG_COMMAND = ( ) -def listen_to_audio(config: FrigateConfig) -> None: +def listen_to_audio( + config: FrigateConfig, + process_info: dict[str, FeatureMetricsTypes], +) -> None: stop_event = mp.Event() audio_threads: list[threading.Thread] = [] def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: stop_event.set() - for thread in audio_threads: - thread.join() - signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) @@ -60,14 +60,23 @@ def listen_to_audio(config: FrigateConfig) -> None: listen() for camera in config.cameras.values(): - if camera.enabled and camera.audio.enabled: - audio = AudioEventMaintainer(camera, stop_event) + if camera.enabled and camera.audio.enabled_in_config: + audio = AudioEventMaintainer(camera, process_info, stop_event) audio_threads.append(audio) audio.start() + while not stop_event.is_set(): + pass + + for thread in audio_threads: + thread.join() + + logger.info("Exiting audio detector...") + class AudioTfl: - def __init__(self): + def __init__(self, stop_event: mp.Event): + self.stop_event = stop_event self.labels = load_labels("/audio-labelmap.txt") self.interpreter = Interpreter( model_path="/cpu_audio_model.tflite", @@ -110,6 +119,9 @@ class AudioTfl: def detect(self, tensor_input, threshold=0.8): detections = [] + if self.stop_event.is_set(): + return detections + raw_detections = self._detect_raw(tensor_input) for d in raw_detections: @@ -122,13 +134,19 @@ class AudioTfl: class AudioEventMaintainer(threading.Thread): - def __init__(self, camera: CameraConfig, stop_event: mp.Event) -> None: + def __init__( + self, + camera: CameraConfig, + feature_metrics: dict[str, FeatureMetricsTypes], + stop_event: mp.Event, + ) -> None: threading.Thread.__init__(self) self.name = f"{camera.name}_audio_event_processor" self.config = camera - self.detections: dict[dict[str, any]] = {} + self.feature_metrics = feature_metrics + self.detections: dict[dict[str, any]] = feature_metrics self.stop_event = stop_event - self.detector = AudioTfl() + self.detector = AudioTfl(stop_event) self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),) self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2)) self.pipe = f"{CACHE_DIR}/{self.config.name}-audio" @@ -147,6 +165,11 @@ class AudioEventMaintainer(threading.Thread): self.audio_listener = None def detect_audio(self, audio) -> None: + if not self.feature_metrics[self.config.name]["audio_enabled"].value: + return + + logger.error(f"Running audio inference") + waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32) model_detections = self.detector.detect(waveform) @@ -181,7 +204,10 @@ class AudioEventMaintainer(threading.Thread): now = datetime.datetime.now().timestamp() for detection in self.detections.values(): - if now - detection["last_detection"] > self.config.audio.max_not_heard: + if ( + now - detection.get("last_detection", now) + > self.config.audio.max_not_heard + ): self.detections[detection["label"]] = None requests.put( f"http://127.0.0.1/api/events/{detection['event_id']}/end", diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 3ed6540d0..8e40fc6e7 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -20,7 +20,7 @@ import psutil from frigate.config import FrigateConfig, RetainModeEnum from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.models import Event, Recordings -from frigate.types import RecordMetricsTypes +from frigate.types import FeatureMetricsTypes from frigate.util import area, get_video_properties logger = logging.getLogger(__name__) @@ -31,7 +31,7 @@ class RecordingMaintainer(threading.Thread): self, config: FrigateConfig, recordings_info_queue: mp.Queue, - process_info: dict[str, RecordMetricsTypes], + process_info: dict[str, FeatureMetricsTypes], stop_event: MpEvent, ): threading.Thread.__init__(self) diff --git a/frigate/record/record.py b/frigate/record/record.py index ab6cd3450..530adc031 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -14,7 +14,7 @@ from frigate.config import FrigateConfig from frigate.models import Event, Recordings, RecordingsToDelete, Timeline from frigate.record.cleanup import RecordingCleanup from frigate.record.maintainer import RecordingMaintainer -from frigate.types import RecordMetricsTypes +from frigate.types import FeatureMetricsTypes from frigate.util import listen logger = logging.getLogger(__name__) @@ -23,7 +23,7 @@ logger = logging.getLogger(__name__) def manage_recordings( config: FrigateConfig, recordings_info_queue: mp.Queue, - process_info: dict[str, RecordMetricsTypes], + process_info: dict[str, FeatureMetricsTypes], ) -> None: stop_event = mp.Event() diff --git a/frigate/types.py b/frigate/types.py index 8c3e54654..af02f4c86 100644 --- a/frigate/types.py +++ b/frigate/types.py @@ -24,7 +24,8 @@ class CameraMetricsTypes(TypedDict): skipped_fps: Synchronized -class RecordMetricsTypes(TypedDict): +class FeatureMetricsTypes(TypedDict): + audio_enabled: Synchronized record_enabled: Synchronized diff --git a/web/src/icons/Audio.jsx b/web/src/icons/Audio.jsx index 68944f70d..cec783854 100644 --- a/web/src/icons/Audio.jsx +++ b/web/src/icons/Audio.jsx @@ -7,7 +7,7 @@ export function Snapshot({ className = 'h-6 w-6', stroke = 'currentColor', onCli xmlns="http://www.w3.org/2000/svg" className={className} fill="none" - viewBox="0 0 24 24" + viewBox="0 0 32 32" stroke={stroke} onClick={onClick} > @@ -29,7 +29,6 @@ export function Snapshot({ className = 'h-6 w-6', stroke = 'currentColor', onCli stroke-width="2" d="M9.28 8.082A3.006 3.006 0 0113 11h2a4.979 4.979 0 00-1.884-3.911 5.041 5.041 0 00-4.281-.957 4.95 4.95 0 00-3.703 3.703 5.032 5.032 0 002.304 5.458A3.078 3.078 0 019 17.924V20h2v-2.077a5.06 5.06 0 00-2.537-4.346 3.002 3.002 0 01.817-5.494z" /> - ); } diff --git a/web/src/routes/Cameras.jsx b/web/src/routes/Cameras.jsx index 35c90f644..2298b992e 100644 --- a/web/src/routes/Cameras.jsx +++ b/web/src/routes/Cameras.jsx @@ -94,8 +94,8 @@ function Camera({ name, config }) { sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true); }, } - : {}, - ], + : null, + ].filter((button) => button != null), [config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots] );