Add ability to toggle audio with mqtt

This commit is contained in:
Nick Mowen 2023-06-19 16:19:55 -06:00
parent 8198165fa7
commit 85cc348751
8 changed files with 92 additions and 47 deletions

View File

@ -45,7 +45,7 @@ from frigate.record.record import manage_recordings
from frigate.stats import StatsEmitter, stats_init from frigate.stats import StatsEmitter, stats_init
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor from frigate.timeline import TimelineProcessor
from frigate.types import CameraMetricsTypes, RecordMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.version import VERSION from frigate.version import VERSION
from frigate.video import capture_camera, track_camera from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog from frigate.watchdog import FrigateWatchdog
@ -63,7 +63,7 @@ class FrigateApp:
self.log_queue: Queue = mp.Queue() self.log_queue: Queue = mp.Queue()
self.plus_api = PlusApi() self.plus_api = PlusApi()
self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.record_metrics: dict[str, RecordMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
self.processes: dict[str, int] = {} self.processes: dict[str, int] = {}
def set_environment_vars(self) -> None: def set_environment_vars(self) -> None:
@ -105,25 +105,19 @@ class FrigateApp:
user_config = FrigateConfig.parse_file(config_file) user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config(self.plus_api) self.config = user_config.runtime_config(self.plus_api)
for camera_name in self.config.cameras.keys(): for camera_name, camera_config in self.config.cameras.items():
# create camera_metrics # create camera_metrics
self.camera_metrics[camera_name] = { self.camera_metrics[camera_name] = {
"camera_fps": mp.Value("d", 0.0), "camera_fps": mp.Value("d", 0.0),
"skipped_fps": mp.Value("d", 0.0), "skipped_fps": mp.Value("d", 0.0),
"process_fps": mp.Value("d", 0.0), "process_fps": mp.Value("d", 0.0),
"detection_enabled": mp.Value( "detection_enabled": mp.Value("i", camera_config.detect.enabled),
"i", self.config.cameras[camera_name].detect.enabled
),
"motion_enabled": mp.Value("i", True), "motion_enabled": mp.Value("i", True),
"improve_contrast_enabled": mp.Value( "improve_contrast_enabled": mp.Value(
"i", self.config.cameras[camera_name].motion.improve_contrast "i", camera_config.motion.improve_contrast
),
"motion_threshold": mp.Value(
"i", self.config.cameras[camera_name].motion.threshold
),
"motion_contour_area": mp.Value(
"i", self.config.cameras[camera_name].motion.contour_area
), ),
"motion_threshold": mp.Value("i", camera_config.motion.threshold),
"motion_contour_area": mp.Value("i", camera_config.motion.contour_area),
"detection_fps": mp.Value("d", 0.0), "detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0), "detection_frame": mp.Value("d", 0.0),
"read_start": mp.Value("d", 0.0), "read_start": mp.Value("d", 0.0),
@ -132,10 +126,9 @@ class FrigateApp:
"capture_process": None, "capture_process": None,
"process": None, "process": None,
} }
self.record_metrics[camera_name] = { self.feature_metrics[camera_name] = {
"record_enabled": mp.Value( "audio_enabled": mp.Value("i", camera_config.audio.enabled),
"i", self.config.cameras[camera_name].record.enabled "record_enabled": mp.Value("i", camera_config.record.enabled),
)
} }
def set_log_levels(self) -> None: def set_log_levels(self) -> None:
@ -223,7 +216,7 @@ class FrigateApp:
recording_process = mp.Process( recording_process = mp.Process(
target=manage_recordings, target=manage_recordings,
name="recording_manager", name="recording_manager",
args=(self.config, self.recordings_info_queue, self.record_metrics), args=(self.config, self.recordings_info_queue, self.feature_metrics),
) )
recording_process.daemon = True recording_process.daemon = True
self.recording_process = recording_process self.recording_process = recording_process
@ -282,7 +275,7 @@ class FrigateApp:
self.config, self.config,
self.onvif_controller, self.onvif_controller,
self.camera_metrics, self.camera_metrics,
self.record_metrics, self.feature_metrics,
comms, comms,
) )
@ -394,7 +387,9 @@ class FrigateApp:
def start_audio_processors(self) -> None: def start_audio_processors(self) -> None:
if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0: if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0:
audio_process = mp.Process( audio_process = mp.Process(
target=listen_to_audio, name=f"audio_capture", args=(self.config,) target=listen_to_audio,
name=f"audio_capture",
args=(self.config, self.feature_metrics),
) )
audio_process.daemon = True audio_process.daemon = True
audio_process.start() audio_process.start()

View File

@ -6,7 +6,7 @@ from typing import Any, Callable
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.ptz import OnvifCommandEnum, OnvifController from frigate.ptz import OnvifCommandEnum, OnvifController
from frigate.types import CameraMetricsTypes, RecordMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.util import restart_frigate from frigate.util import restart_frigate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -39,19 +39,20 @@ class Dispatcher:
config: FrigateConfig, config: FrigateConfig,
onvif: OnvifController, onvif: OnvifController,
camera_metrics: dict[str, CameraMetricsTypes], camera_metrics: dict[str, CameraMetricsTypes],
record_metrics: dict[str, RecordMetricsTypes], feature_metrics: dict[str, FeatureMetricsTypes],
communicators: list[Communicator], communicators: list[Communicator],
) -> None: ) -> None:
self.config = config self.config = config
self.onvif = onvif self.onvif = onvif
self.camera_metrics = camera_metrics self.camera_metrics = camera_metrics
self.record_metrics = record_metrics self.feature_metrics = feature_metrics
self.comms = communicators self.comms = communicators
for comm in self.comms: for comm in self.comms:
comm.subscribe(self._receive) comm.subscribe(self._receive)
self._camera_settings_handlers: dict[str, Callable] = { self._camera_settings_handlers: dict[str, Callable] = {
"audio": self._on_audio_command,
"detect": self._on_detect_command, "detect": self._on_detect_command,
"improve_contrast": self._on_motion_improve_contrast_command, "improve_contrast": self._on_motion_improve_contrast_command,
"motion": self._on_motion_command, "motion": self._on_motion_command,
@ -186,6 +187,29 @@ class Dispatcher:
motion_settings.threshold = payload # type: ignore[union-attr] motion_settings.threshold = payload # type: ignore[union-attr]
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True) self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
def _on_audio_command(self, camera_name: str, payload: str) -> None:
"""Callback for audio topic."""
audio_settings = self.config.cameras[camera_name].audio
if payload == "ON":
if not self.config.cameras[camera_name].audio.enabled_in_config:
logger.error(
"Audio detection must be enabled in the config to be turned on via MQTT."
)
return
if not audio_settings.enabled:
logger.info(f"Turning on audio detection for {camera_name}")
audio_settings.enabled = True
self.feature_metrics[camera_name]["audio_enabled"].value = True
elif payload == "OFF":
if self.feature_metrics[camera_name]["audio_enabled"].value:
logger.info(f"Turning off audio detection for {camera_name}")
audio_settings.enabled = False
self.feature_metrics[camera_name]["audio_enabled"].value = False
self.publish(f"{camera_name}/audio/state", payload, retain=True)
def _on_recordings_command(self, camera_name: str, payload: str) -> None: def _on_recordings_command(self, camera_name: str, payload: str) -> None:
"""Callback for recordings topic.""" """Callback for recordings topic."""
record_settings = self.config.cameras[camera_name].record record_settings = self.config.cameras[camera_name].record
@ -200,12 +224,12 @@ class Dispatcher:
if not record_settings.enabled: if not record_settings.enabled:
logger.info(f"Turning on recordings for {camera_name}") logger.info(f"Turning on recordings for {camera_name}")
record_settings.enabled = True record_settings.enabled = True
self.record_metrics[camera_name]["record_enabled"].value = True self.feature_metrics[camera_name]["record_enabled"].value = True
elif payload == "OFF": elif payload == "OFF":
if self.record_metrics[camera_name]["record_enabled"].value: if self.feature_metrics[camera_name]["record_enabled"].value:
logger.info(f"Turning off recordings for {camera_name}") logger.info(f"Turning off recordings for {camera_name}")
record_settings.enabled = False record_settings.enabled = False
self.record_metrics[camera_name]["record_enabled"].value = False self.feature_metrics[camera_name]["record_enabled"].value = False
self.publish(f"{camera_name}/recordings/state", payload, retain=True) self.publish(f"{camera_name}/recordings/state", payload, retain=True)

View File

@ -23,10 +23,10 @@ from frigate.const import (
AUDIO_SAMPLE_RATE, AUDIO_SAMPLE_RATE,
CACHE_DIR, CACHE_DIR,
) )
from frigate.events.maintainer import EventTypeEnum
from frigate.ffmpeg_presets import parse_preset_input from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.object_detection import load_labels from frigate.object_detection import load_labels
from frigate.types import FeatureMetricsTypes
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
from frigate.util import get_ffmpeg_arg_list, listen from frigate.util import get_ffmpeg_arg_list, listen
@ -42,16 +42,16 @@ FFMPEG_COMMAND = (
) )
def listen_to_audio(config: FrigateConfig) -> None: def listen_to_audio(
config: FrigateConfig,
process_info: dict[str, FeatureMetricsTypes],
) -> None:
stop_event = mp.Event() stop_event = mp.Event()
audio_threads: list[threading.Thread] = [] audio_threads: list[threading.Thread] = []
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set() stop_event.set()
for thread in audio_threads:
thread.join()
signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal) signal.signal(signal.SIGINT, receiveSignal)
@ -60,14 +60,23 @@ def listen_to_audio(config: FrigateConfig) -> None:
listen() listen()
for camera in config.cameras.values(): for camera in config.cameras.values():
if camera.enabled and camera.audio.enabled: if camera.enabled and camera.audio.enabled_in_config:
audio = AudioEventMaintainer(camera, stop_event) audio = AudioEventMaintainer(camera, process_info, stop_event)
audio_threads.append(audio) audio_threads.append(audio)
audio.start() audio.start()
while not stop_event.is_set():
pass
for thread in audio_threads:
thread.join()
logger.info("Exiting audio detector...")
class AudioTfl: class AudioTfl:
def __init__(self): def __init__(self, stop_event: mp.Event):
self.stop_event = stop_event
self.labels = load_labels("/audio-labelmap.txt") self.labels = load_labels("/audio-labelmap.txt")
self.interpreter = Interpreter( self.interpreter = Interpreter(
model_path="/cpu_audio_model.tflite", model_path="/cpu_audio_model.tflite",
@ -110,6 +119,9 @@ class AudioTfl:
def detect(self, tensor_input, threshold=0.8): def detect(self, tensor_input, threshold=0.8):
detections = [] detections = []
if self.stop_event.is_set():
return detections
raw_detections = self._detect_raw(tensor_input) raw_detections = self._detect_raw(tensor_input)
for d in raw_detections: for d in raw_detections:
@ -122,13 +134,19 @@ class AudioTfl:
class AudioEventMaintainer(threading.Thread): class AudioEventMaintainer(threading.Thread):
def __init__(self, camera: CameraConfig, stop_event: mp.Event) -> None: def __init__(
self,
camera: CameraConfig,
feature_metrics: dict[str, FeatureMetricsTypes],
stop_event: mp.Event,
) -> None:
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = f"{camera.name}_audio_event_processor" self.name = f"{camera.name}_audio_event_processor"
self.config = camera self.config = camera
self.detections: dict[dict[str, any]] = {} self.feature_metrics = feature_metrics
self.detections: dict[dict[str, any]] = feature_metrics
self.stop_event = stop_event self.stop_event = stop_event
self.detector = AudioTfl() self.detector = AudioTfl(stop_event)
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),) self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2)) self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2))
self.pipe = f"{CACHE_DIR}/{self.config.name}-audio" self.pipe = f"{CACHE_DIR}/{self.config.name}-audio"
@ -147,6 +165,11 @@ class AudioEventMaintainer(threading.Thread):
self.audio_listener = None self.audio_listener = None
def detect_audio(self, audio) -> None: def detect_audio(self, audio) -> None:
if not self.feature_metrics[self.config.name]["audio_enabled"].value:
return
logger.error(f"Running audio inference")
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32) waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
model_detections = self.detector.detect(waveform) model_detections = self.detector.detect(waveform)
@ -181,7 +204,10 @@ class AudioEventMaintainer(threading.Thread):
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
for detection in self.detections.values(): for detection in self.detections.values():
if now - detection["last_detection"] > self.config.audio.max_not_heard: if (
now - detection.get("last_detection", now)
> self.config.audio.max_not_heard
):
self.detections[detection["label"]] = None self.detections[detection["label"]] = None
requests.put( requests.put(
f"http://127.0.0.1/api/events/{detection['event_id']}/end", f"http://127.0.0.1/api/events/{detection['event_id']}/end",

View File

@ -20,7 +20,7 @@ import psutil
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.types import RecordMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import area, get_video_properties from frigate.util import area, get_video_properties
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -31,7 +31,7 @@ class RecordingMaintainer(threading.Thread):
self, self,
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: mp.Queue,
process_info: dict[str, RecordMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
stop_event: MpEvent, stop_event: MpEvent,
): ):
threading.Thread.__init__(self) threading.Thread.__init__(self)

View File

@ -14,7 +14,7 @@ from frigate.config import FrigateConfig
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
from frigate.record.cleanup import RecordingCleanup from frigate.record.cleanup import RecordingCleanup
from frigate.record.maintainer import RecordingMaintainer from frigate.record.maintainer import RecordingMaintainer
from frigate.types import RecordMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import listen from frigate.util import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
def manage_recordings( def manage_recordings(
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: mp.Queue,
process_info: dict[str, RecordMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
) -> None: ) -> None:
stop_event = mp.Event() stop_event = mp.Event()

View File

@ -24,7 +24,8 @@ class CameraMetricsTypes(TypedDict):
skipped_fps: Synchronized skipped_fps: Synchronized
class RecordMetricsTypes(TypedDict): class FeatureMetricsTypes(TypedDict):
audio_enabled: Synchronized
record_enabled: Synchronized record_enabled: Synchronized

View File

@ -7,7 +7,7 @@ export function Snapshot({ className = 'h-6 w-6', stroke = 'currentColor', onCli
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
className={className} className={className}
fill="none" fill="none"
viewBox="0 0 24 24" viewBox="0 0 32 32"
stroke={stroke} stroke={stroke}
onClick={onClick} onClick={onClick}
> >
@ -29,7 +29,6 @@ export function Snapshot({ className = 'h-6 w-6', stroke = 'currentColor', onCli
stroke-width="2" stroke-width="2"
d="M9.28 8.082A3.006 3.006 0 0113 11h2a4.979 4.979 0 00-1.884-3.911 5.041 5.041 0 00-4.281-.957 4.95 4.95 0 00-3.703 3.703 5.032 5.032 0 002.304 5.458A3.078 3.078 0 019 17.924V20h2v-2.077a5.06 5.06 0 00-2.537-4.346 3.002 3.002 0 01.817-5.494z" d="M9.28 8.082A3.006 3.006 0 0113 11h2a4.979 4.979 0 00-1.884-3.911 5.041 5.041 0 00-4.281-.957 4.95 4.95 0 00-3.703 3.703 5.032 5.032 0 002.304 5.458A3.078 3.078 0 019 17.924V20h2v-2.077a5.06 5.06 0 00-2.537-4.346 3.002 3.002 0 01.817-5.494z"
/> />
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" fill="none" d="M0 0h32v32H0z" />
</svg> </svg>
); );
} }

View File

@ -94,8 +94,8 @@ function Camera({ name, config }) {
sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true); sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true);
}, },
} }
: {}, : null,
], ].filter((button) => button != null),
[config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots] [config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
); );