2023-07-01 16:18:33 +03:00
|
|
|
"""Handle creating audio events."""
|
|
|
|
|
|
|
|
|
|
import datetime
|
|
|
|
|
import logging
|
|
|
|
|
import threading
|
2023-07-26 13:51:45 +03:00
|
|
|
import time
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
from multiprocessing.managers import DictProxy
|
2025-06-24 20:41:11 +03:00
|
|
|
from multiprocessing.synchronize import Event as MpEvent
|
2025-08-25 21:40:21 +03:00
|
|
|
from typing import Tuple
|
2023-07-01 16:18:33 +03:00
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
|
2024-02-19 16:26:59 +03:00
|
|
|
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
2024-02-15 03:24:36 +03:00
|
|
|
from frigate.comms.inter_process import InterProcessRequestor
|
2025-05-27 18:26:00 +03:00
|
|
|
from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig
|
2025-05-22 21:16:51 +03:00
|
|
|
from frigate.config.camera.updater import (
|
|
|
|
|
CameraConfigUpdateEnum,
|
|
|
|
|
CameraConfigUpdateSubscriber,
|
|
|
|
|
)
|
2023-07-01 16:18:33 +03:00
|
|
|
from frigate.const import (
|
|
|
|
|
AUDIO_DURATION,
|
|
|
|
|
AUDIO_FORMAT,
|
|
|
|
|
AUDIO_MAX_BIT_RANGE,
|
2023-07-17 14:07:15 +03:00
|
|
|
AUDIO_MIN_CONFIDENCE,
|
2023-07-01 16:18:33 +03:00
|
|
|
AUDIO_SAMPLE_RATE,
|
2025-08-25 21:40:21 +03:00
|
|
|
EXPIRE_AUDIO_ACTIVITY,
|
2025-07-18 20:23:06 +03:00
|
|
|
PROCESS_PRIORITY_HIGH,
|
2025-08-25 21:40:21 +03:00
|
|
|
UPDATE_AUDIO_ACTIVITY,
|
2023-07-01 16:18:33 +03:00
|
|
|
)
|
2025-06-03 14:53:48 +03:00
|
|
|
from frigate.data_processing.common.audio_transcription.model import (
|
|
|
|
|
AudioTranscriptionModelRunner,
|
|
|
|
|
)
|
2025-05-27 18:26:00 +03:00
|
|
|
from frigate.data_processing.real_time.audio_transcription import (
|
|
|
|
|
AudioTranscriptionRealTimeProcessor,
|
|
|
|
|
)
|
2023-07-01 16:18:33 +03:00
|
|
|
from frigate.ffmpeg_presets import parse_preset_input
|
2025-12-19 01:12:10 +03:00
|
|
|
from frigate.log import LogPipe, suppress_stderr_during
|
2025-04-15 16:55:38 +03:00
|
|
|
from frigate.object_detection.base import load_labels
|
2023-07-06 17:28:50 +03:00
|
|
|
from frigate.util.builtin import get_ffmpeg_arg_list
|
2025-06-13 20:09:51 +03:00
|
|
|
from frigate.util.process import FrigateProcess
|
2023-07-01 16:18:33 +03:00
|
|
|
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from tflite_runtime.interpreter import Interpreter
|
|
|
|
|
except ModuleNotFoundError:
|
|
|
|
|
from tensorflow.lite.python.interpreter import Interpreter
|
|
|
|
|
|
|
|
|
|
|
2025-06-25 16:24:45 +03:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
2023-10-14 01:03:04 +03:00
|
|
|
def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
|
|
|
|
|
ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0]
|
|
|
|
|
input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + (
|
|
|
|
|
parse_preset_input(ffmpeg_input.input_args, 1)
|
2023-12-12 13:48:09 +03:00
|
|
|
or get_ffmpeg_arg_list(ffmpeg_input.input_args)
|
2023-10-14 01:03:04 +03:00
|
|
|
or parse_preset_input(ffmpeg.input_args, 1)
|
2023-12-12 13:48:09 +03:00
|
|
|
or get_ffmpeg_arg_list(ffmpeg.input_args)
|
2023-10-14 01:03:04 +03:00
|
|
|
)
|
|
|
|
|
return (
|
2024-09-13 23:14:51 +03:00
|
|
|
[ffmpeg.ffmpeg_path, "-vn", "-threads", "1"]
|
2023-10-14 01:03:04 +03:00
|
|
|
+ input_args
|
|
|
|
|
+ ["-i"]
|
|
|
|
|
+ [ffmpeg_input.path]
|
|
|
|
|
+ [
|
2024-05-30 20:34:01 +03:00
|
|
|
"-threads",
|
|
|
|
|
"1",
|
2023-10-14 01:03:04 +03:00
|
|
|
"-f",
|
|
|
|
|
f"{AUDIO_FORMAT}",
|
|
|
|
|
"-ar",
|
|
|
|
|
f"{AUDIO_SAMPLE_RATE}",
|
|
|
|
|
"-ac",
|
|
|
|
|
"1",
|
|
|
|
|
"-y",
|
2023-07-26 13:51:45 +03:00
|
|
|
"pipe:",
|
2023-10-14 01:03:04 +03:00
|
|
|
]
|
2023-07-01 16:18:33 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2025-06-13 20:09:51 +03:00
|
|
|
class AudioProcessor(FrigateProcess):
|
2024-10-21 18:00:38 +03:00
|
|
|
name = "frigate.audio_manager"
|
|
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
2025-05-27 18:26:00 +03:00
|
|
|
config: FrigateConfig,
|
2024-09-27 15:53:23 +03:00
|
|
|
cameras: list[CameraConfig],
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
camera_metrics: DictProxy,
|
2025-06-24 20:41:11 +03:00
|
|
|
stop_event: MpEvent,
|
2024-09-27 15:53:23 +03:00
|
|
|
):
|
2025-07-18 20:23:06 +03:00
|
|
|
super().__init__(
|
|
|
|
|
stop_event, PROCESS_PRIORITY_HIGH, name="frigate.audio_manager", daemon=True
|
|
|
|
|
)
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
self.camera_metrics = camera_metrics
|
|
|
|
|
self.cameras = cameras
|
2025-05-27 18:26:00 +03:00
|
|
|
self.config = config
|
2025-06-11 20:25:30 +03:00
|
|
|
|
2025-10-09 02:06:03 +03:00
|
|
|
def run(self) -> None:
|
|
|
|
|
self.pre_run_setup(self.config.logger)
|
|
|
|
|
audio_threads: list[AudioEventMaintainer] = []
|
|
|
|
|
|
|
|
|
|
threading.current_thread().name = "process:audio_manager"
|
|
|
|
|
|
2025-06-12 22:34:45 +03:00
|
|
|
if self.config.audio_transcription.enabled:
|
2025-06-11 20:25:30 +03:00
|
|
|
self.transcription_model_runner = AudioTranscriptionModelRunner(
|
|
|
|
|
self.config.audio_transcription.device,
|
|
|
|
|
self.config.audio_transcription.model_size,
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
self.transcription_model_runner = None
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
if len(self.cameras) == 0:
|
|
|
|
|
return
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2024-10-03 20:03:43 +03:00
|
|
|
for camera in self.cameras:
|
|
|
|
|
audio_thread = AudioEventMaintainer(
|
|
|
|
|
camera,
|
2025-05-27 18:26:00 +03:00
|
|
|
self.config,
|
2024-10-03 20:03:43 +03:00
|
|
|
self.camera_metrics,
|
2025-06-03 14:53:48 +03:00
|
|
|
self.transcription_model_runner,
|
2024-10-03 20:03:43 +03:00
|
|
|
self.stop_event,
|
|
|
|
|
)
|
|
|
|
|
audio_threads.append(audio_thread)
|
|
|
|
|
audio_thread.start()
|
|
|
|
|
|
|
|
|
|
self.logger.info(f"Audio processor started (pid: {self.pid})")
|
|
|
|
|
|
|
|
|
|
while not self.stop_event.wait():
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
for thread in audio_threads:
|
|
|
|
|
thread.join(1)
|
|
|
|
|
if thread.is_alive():
|
|
|
|
|
self.logger.info(f"Waiting for thread {thread.name:s} to exit")
|
|
|
|
|
thread.join(10)
|
|
|
|
|
|
|
|
|
|
for thread in audio_threads:
|
|
|
|
|
if thread.is_alive():
|
|
|
|
|
self.logger.warning(f"Thread {thread.name} is still alive")
|
|
|
|
|
|
|
|
|
|
self.logger.info("Exiting audio processor")
|
2023-07-01 16:18:33 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class AudioEventMaintainer(threading.Thread):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
camera: CameraConfig,
|
2025-05-27 18:26:00 +03:00
|
|
|
config: FrigateConfig,
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
camera_metrics: DictProxy,
|
2025-06-11 20:25:30 +03:00
|
|
|
audio_transcription_model_runner: AudioTranscriptionModelRunner | None,
|
2024-09-27 15:53:23 +03:00
|
|
|
stop_event: threading.Event,
|
2023-07-01 16:18:33 +03:00
|
|
|
) -> None:
|
2024-09-27 15:53:23 +03:00
|
|
|
super().__init__(name=f"{camera.name}_audio_event_processor")
|
|
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
self.config = config
|
|
|
|
|
self.camera_config = camera
|
2023-10-13 14:17:41 +03:00
|
|
|
self.camera_metrics = camera_metrics
|
2023-07-01 16:18:33 +03:00
|
|
|
self.stop_event = stop_event
|
2025-05-27 18:26:00 +03:00
|
|
|
self.detector = AudioTfl(stop_event, self.camera_config.audio.num_threads)
|
2023-07-01 16:18:33 +03:00
|
|
|
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
|
|
|
|
|
self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2))
|
2025-05-27 18:26:00 +03:00
|
|
|
self.logger = logging.getLogger(f"audio.{self.camera_config.name}")
|
|
|
|
|
self.ffmpeg_cmd = get_ffmpeg_command(self.camera_config.ffmpeg)
|
|
|
|
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_config.name}.audio")
|
2023-07-01 16:18:33 +03:00
|
|
|
self.audio_listener = None
|
2025-06-03 14:53:48 +03:00
|
|
|
self.audio_transcription_model_runner = audio_transcription_model_runner
|
2025-05-27 18:26:00 +03:00
|
|
|
self.transcription_processor = None
|
|
|
|
|
self.transcription_thread = None
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2024-02-15 03:24:36 +03:00
|
|
|
# create communication for audio detections
|
|
|
|
|
self.requestor = InterProcessRequestor()
|
2025-05-22 21:16:51 +03:00
|
|
|
self.config_subscriber = CameraConfigUpdateSubscriber(
|
2025-06-11 20:25:30 +03:00
|
|
|
None,
|
2025-05-27 18:26:00 +03:00
|
|
|
{self.camera_config.name: self.camera_config},
|
|
|
|
|
[
|
|
|
|
|
CameraConfigUpdateEnum.audio,
|
|
|
|
|
CameraConfigUpdateEnum.enabled,
|
|
|
|
|
CameraConfigUpdateEnum.audio_transcription,
|
|
|
|
|
],
|
2025-03-05 16:30:23 +03:00
|
|
|
)
|
2025-08-08 15:08:37 +03:00
|
|
|
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio.value)
|
2024-02-15 03:24:36 +03:00
|
|
|
|
2025-10-09 02:06:03 +03:00
|
|
|
if self.config.audio_transcription.enabled:
|
2025-05-27 18:26:00 +03:00
|
|
|
# init the transcription processor for this camera
|
|
|
|
|
self.transcription_processor = AudioTranscriptionRealTimeProcessor(
|
|
|
|
|
config=self.config,
|
|
|
|
|
camera_config=self.camera_config,
|
|
|
|
|
requestor=self.requestor,
|
2025-06-03 14:53:48 +03:00
|
|
|
model_runner=self.audio_transcription_model_runner,
|
2025-05-27 18:26:00 +03:00
|
|
|
metrics=self.camera_metrics[self.camera_config.name],
|
|
|
|
|
stop_event=self.stop_event,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.transcription_thread = threading.Thread(
|
|
|
|
|
target=self.transcription_processor.run,
|
|
|
|
|
name=f"{self.camera_config.name}_transcription_processor",
|
|
|
|
|
daemon=True,
|
|
|
|
|
)
|
|
|
|
|
self.transcription_thread.start()
|
|
|
|
|
|
2025-03-05 16:30:23 +03:00
|
|
|
self.was_enabled = camera.enabled
|
|
|
|
|
|
2023-07-01 16:18:33 +03:00
|
|
|
def detect_audio(self, audio) -> None:
|
2025-05-27 18:26:00 +03:00
|
|
|
if not self.camera_config.audio.enabled or self.stop_event.is_set():
|
2023-07-01 16:18:33 +03:00
|
|
|
return
|
|
|
|
|
|
2023-07-14 03:52:33 +03:00
|
|
|
audio_as_float = audio.astype(np.float32)
|
2023-07-15 03:05:14 +03:00
|
|
|
rms, dBFS = self.calculate_audio_levels(audio_as_float)
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
self.camera_metrics[self.camera_config.name].audio_rms.value = rms
|
|
|
|
|
self.camera_metrics[self.camera_config.name].audio_dBFS.value = dBFS
|
2023-10-13 14:17:41 +03:00
|
|
|
|
2025-08-25 21:40:21 +03:00
|
|
|
audio_detections: list[Tuple[str, float]] = []
|
|
|
|
|
|
2023-07-08 15:16:24 +03:00
|
|
|
# only run audio detection when volume is above min_volume
|
2025-05-27 18:26:00 +03:00
|
|
|
if rms >= self.camera_config.audio.min_volume:
|
2023-07-15 03:05:14 +03:00
|
|
|
# create waveform relative to max range and look for detections
|
2023-07-08 15:16:24 +03:00
|
|
|
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
|
|
|
|
|
model_detections = self.detector.detect(waveform)
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2023-07-08 15:16:24 +03:00
|
|
|
for label, score, _ in model_detections:
|
2024-09-27 15:53:23 +03:00
|
|
|
self.logger.debug(
|
2025-05-27 18:26:00 +03:00
|
|
|
f"{self.camera_config.name} heard {label} with a score of {score}"
|
2024-08-07 01:11:20 +03:00
|
|
|
)
|
2023-10-07 17:23:11 +03:00
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
if label not in self.camera_config.audio.listen:
|
2023-07-08 15:16:24 +03:00
|
|
|
continue
|
|
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
if score > dict(
|
|
|
|
|
(self.camera_config.audio.filters or {}).get(label, {})
|
|
|
|
|
).get("threshold", 0.8):
|
2025-08-25 21:40:21 +03:00
|
|
|
audio_detections.append((label, score))
|
2023-10-25 02:26:46 +03:00
|
|
|
|
2024-02-19 16:26:59 +03:00
|
|
|
# send audio detection data
|
2024-06-22 00:30:19 +03:00
|
|
|
self.detection_publisher.publish(
|
2023-10-25 02:26:46 +03:00
|
|
|
(
|
2025-05-27 18:26:00 +03:00
|
|
|
self.camera_config.name,
|
2023-10-25 02:26:46 +03:00
|
|
|
datetime.datetime.now().timestamp(),
|
|
|
|
|
dBFS,
|
2025-08-25 21:40:21 +03:00
|
|
|
[label for label, _ in audio_detections],
|
2023-10-25 02:26:46 +03:00
|
|
|
)
|
|
|
|
|
)
|
2023-07-01 16:18:33 +03:00
|
|
|
|
2025-08-25 21:40:21 +03:00
|
|
|
# send audio activity update
|
|
|
|
|
self.requestor.send_data(
|
|
|
|
|
UPDATE_AUDIO_ACTIVITY,
|
|
|
|
|
{self.camera_config.name: {"detections": audio_detections}},
|
|
|
|
|
)
|
|
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
# run audio transcription
|
2025-06-03 14:53:48 +03:00
|
|
|
if self.transcription_processor is not None:
|
|
|
|
|
if self.camera_config.audio_transcription.live_enabled:
|
|
|
|
|
# process audio until we've reached the endpoint
|
|
|
|
|
self.transcription_processor.process_audio(
|
|
|
|
|
{
|
|
|
|
|
"id": f"{self.camera_config.name}_audio",
|
|
|
|
|
"camera": self.camera_config.name,
|
|
|
|
|
},
|
|
|
|
|
audio,
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
self.transcription_processor.check_unload_model()
|
2025-05-27 18:26:00 +03:00
|
|
|
|
2023-07-14 03:52:33 +03:00
|
|
|
def calculate_audio_levels(self, audio_as_float: np.float32) -> Tuple[float, float]:
|
|
|
|
|
# Calculate RMS (Root-Mean-Square) which represents the average signal amplitude
|
|
|
|
|
# Note: np.float32 isn't serializable, we must use np.float64 to publish the message
|
2023-07-15 03:05:14 +03:00
|
|
|
rms = np.sqrt(np.mean(np.absolute(np.square(audio_as_float))))
|
2023-07-14 03:52:33 +03:00
|
|
|
|
|
|
|
|
# Transform RMS to dBFS (decibels relative to full scale)
|
2023-11-04 05:18:23 +03:00
|
|
|
if rms > 0:
|
|
|
|
|
dBFS = 20 * np.log10(np.abs(rms) / AUDIO_MAX_BIT_RANGE)
|
|
|
|
|
else:
|
|
|
|
|
dBFS = 0
|
2023-07-14 03:52:33 +03:00
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
self.requestor.send_data(f"{self.camera_config.name}/audio/dBFS", float(dBFS))
|
|
|
|
|
self.requestor.send_data(f"{self.camera_config.name}/audio/rms", float(rms))
|
2023-07-14 03:52:33 +03:00
|
|
|
|
|
|
|
|
return float(rms), float(dBFS)
|
|
|
|
|
|
2023-07-26 13:51:45 +03:00
|
|
|
def start_or_restart_ffmpeg(self) -> None:
|
2023-07-01 16:18:33 +03:00
|
|
|
self.audio_listener = start_or_restart_ffmpeg(
|
2023-07-26 13:51:45 +03:00
|
|
|
self.ffmpeg_cmd,
|
|
|
|
|
self.logger,
|
|
|
|
|
self.logpipe,
|
|
|
|
|
self.chunk_size,
|
|
|
|
|
self.audio_listener,
|
2023-07-01 16:18:33 +03:00
|
|
|
)
|
2025-08-22 15:42:36 +03:00
|
|
|
self.requestor.send_data(f"{self.camera_config.name}/status/audio", "online")
|
2023-07-01 16:18:33 +03:00
|
|
|
|
|
|
|
|
def read_audio(self) -> None:
|
2023-07-26 13:51:45 +03:00
|
|
|
def log_and_restart() -> None:
|
|
|
|
|
if self.stop_event.is_set():
|
|
|
|
|
return
|
|
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
time.sleep(self.camera_config.ffmpeg.retry_interval)
|
2023-07-26 13:51:45 +03:00
|
|
|
self.logpipe.dump()
|
|
|
|
|
self.start_or_restart_ffmpeg()
|
2023-07-01 16:18:33 +03:00
|
|
|
|
|
|
|
|
try:
|
2023-07-26 13:51:45 +03:00
|
|
|
chunk = self.audio_listener.stdout.read(self.chunk_size)
|
|
|
|
|
|
|
|
|
|
if not chunk:
|
|
|
|
|
if self.audio_listener.poll() is not None:
|
2025-08-22 15:42:36 +03:00
|
|
|
self.requestor.send_data(
|
|
|
|
|
f"{self.camera_config.name}/status/audio", "offline"
|
|
|
|
|
)
|
2023-07-26 13:51:45 +03:00
|
|
|
self.logger.error("ffmpeg process is not running, restarting...")
|
|
|
|
|
log_and_restart()
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
audio = np.frombuffer(chunk, dtype=np.int16)
|
2023-07-01 16:18:33 +03:00
|
|
|
self.detect_audio(audio)
|
2023-08-05 13:47:56 +03:00
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.error(f"Error reading audio data from ffmpeg process: {e}")
|
2023-07-26 13:51:45 +03:00
|
|
|
log_and_restart()
|
2023-07-01 16:18:33 +03:00
|
|
|
|
|
|
|
|
def run(self) -> None:
|
2025-05-27 18:26:00 +03:00
|
|
|
if self.camera_config.enabled:
|
2025-03-05 16:30:23 +03:00
|
|
|
self.start_or_restart_ffmpeg()
|
2023-07-01 16:18:33 +03:00
|
|
|
|
|
|
|
|
while not self.stop_event.is_set():
|
2025-05-27 18:26:00 +03:00
|
|
|
enabled = self.camera_config.enabled
|
2025-03-05 16:30:23 +03:00
|
|
|
if enabled != self.was_enabled:
|
|
|
|
|
if enabled:
|
|
|
|
|
self.logger.debug(
|
2025-05-27 18:26:00 +03:00
|
|
|
f"Enabling audio detections for {self.camera_config.name}"
|
2025-03-05 16:30:23 +03:00
|
|
|
)
|
|
|
|
|
self.start_or_restart_ffmpeg()
|
|
|
|
|
else:
|
2025-08-22 15:42:36 +03:00
|
|
|
self.requestor.send_data(
|
|
|
|
|
f"{self.camera_config.name}/status/audio", "disabled"
|
|
|
|
|
)
|
2025-03-05 16:30:23 +03:00
|
|
|
self.logger.debug(
|
2025-05-27 18:26:00 +03:00
|
|
|
f"Disabling audio detections for {self.camera_config.name}, ending events"
|
2025-03-05 16:30:23 +03:00
|
|
|
)
|
2025-08-25 21:40:21 +03:00
|
|
|
self.requestor.send_data(
|
|
|
|
|
EXPIRE_AUDIO_ACTIVITY, self.camera_config.name
|
|
|
|
|
)
|
2025-03-05 16:30:23 +03:00
|
|
|
stop_ffmpeg(self.audio_listener, self.logger)
|
|
|
|
|
self.audio_listener = None
|
|
|
|
|
self.was_enabled = enabled
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if not enabled:
|
|
|
|
|
time.sleep(0.1)
|
|
|
|
|
continue
|
|
|
|
|
|
2024-02-19 16:26:59 +03:00
|
|
|
# check if there is an updated config
|
2025-05-22 21:16:51 +03:00
|
|
|
self.config_subscriber.check_for_updates()
|
2024-02-19 16:26:59 +03:00
|
|
|
|
2023-07-01 16:18:33 +03:00
|
|
|
self.read_audio()
|
|
|
|
|
|
2025-03-10 07:01:18 +03:00
|
|
|
if self.audio_listener:
|
|
|
|
|
stop_ffmpeg(self.audio_listener, self.logger)
|
2025-05-27 18:26:00 +03:00
|
|
|
if self.transcription_thread:
|
|
|
|
|
self.transcription_thread.join(timeout=2)
|
|
|
|
|
if self.transcription_thread.is_alive():
|
|
|
|
|
self.logger.warning(
|
|
|
|
|
f"Audio transcription thread {self.transcription_thread.name} is still alive"
|
|
|
|
|
)
|
2023-07-01 16:18:33 +03:00
|
|
|
self.logpipe.close()
|
2024-02-15 03:24:36 +03:00
|
|
|
self.requestor.stop()
|
2024-02-19 16:26:59 +03:00
|
|
|
self.config_subscriber.stop()
|
|
|
|
|
self.detection_publisher.stop()
|
2024-09-27 15:53:23 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class AudioTfl:
|
|
|
|
|
def __init__(self, stop_event: threading.Event, num_threads=2):
|
|
|
|
|
self.stop_event = stop_event
|
|
|
|
|
self.num_threads = num_threads
|
|
|
|
|
self.labels = load_labels("/audio-labelmap.txt", prefill=521)
|
2025-12-19 01:12:10 +03:00
|
|
|
# Suppress TFLite delegate creation messages that bypass Python logging
|
|
|
|
|
with suppress_stderr_during("tflite_interpreter_init"):
|
|
|
|
|
self.interpreter = Interpreter(
|
|
|
|
|
model_path="/cpu_audio_model.tflite",
|
|
|
|
|
num_threads=self.num_threads,
|
|
|
|
|
)
|
|
|
|
|
self.interpreter.allocate_tensors()
|
2024-09-27 15:53:23 +03:00
|
|
|
|
|
|
|
|
self.tensor_input_details = self.interpreter.get_input_details()
|
|
|
|
|
self.tensor_output_details = self.interpreter.get_output_details()
|
|
|
|
|
|
|
|
|
|
def _detect_raw(self, tensor_input):
|
|
|
|
|
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
|
|
|
|
self.interpreter.invoke()
|
|
|
|
|
detections = np.zeros((20, 6), np.float32)
|
|
|
|
|
|
|
|
|
|
res = self.interpreter.get_tensor(self.tensor_output_details[0]["index"])[0]
|
|
|
|
|
non_zero_indices = res > 0
|
|
|
|
|
class_ids = np.argpartition(-res, 20)[:20]
|
|
|
|
|
class_ids = class_ids[np.argsort(-res[class_ids])]
|
|
|
|
|
class_ids = class_ids[non_zero_indices[class_ids]]
|
|
|
|
|
scores = res[class_ids]
|
|
|
|
|
boxes = np.full((scores.shape[0], 4), -1, np.float32)
|
|
|
|
|
count = len(scores)
|
|
|
|
|
|
|
|
|
|
for i in range(count):
|
|
|
|
|
if scores[i] < AUDIO_MIN_CONFIDENCE or i == 20:
|
|
|
|
|
break
|
|
|
|
|
detections[i] = [
|
|
|
|
|
class_ids[i],
|
|
|
|
|
float(scores[i]),
|
|
|
|
|
boxes[i][0],
|
|
|
|
|
boxes[i][1],
|
|
|
|
|
boxes[i][2],
|
|
|
|
|
boxes[i][3],
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
return detections
|
|
|
|
|
|
|
|
|
|
def detect(self, tensor_input, threshold=AUDIO_MIN_CONFIDENCE):
|
|
|
|
|
detections = []
|
|
|
|
|
|
|
|
|
|
if self.stop_event.is_set():
|
|
|
|
|
return detections
|
|
|
|
|
|
|
|
|
|
raw_detections = self._detect_raw(tensor_input)
|
|
|
|
|
|
|
|
|
|
for d in raw_detections:
|
|
|
|
|
if d[1] < threshold:
|
|
|
|
|
break
|
|
|
|
|
detections.append(
|
|
|
|
|
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
|
|
|
|
|
)
|
|
|
|
|
return detections
|