Save event to db

This commit is contained in:
Nick Mowen 2023-06-18 17:11:11 -06:00
parent b5acd6a287
commit 734eb07a23
3 changed files with 58 additions and 132 deletions

View File

@ -1,126 +0,0 @@
import datetime
import logging
import multiprocessing as mp
import queue
import random
import signal
import string
import threading
import numpy as np
from setproctitle import setproctitle
from frigate.config import CameraConfig, AudioModelConfig
from frigate.object_detection import RemoteObjectDetector
from frigate.util import listen, SharedMemoryFrameManager
logger = logging.getLogger(__name__)
def capture_audio(
name: str,
model_config: AudioModelConfig,
process_info,
):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"capture:{name}"
setproctitle(f"frigate.capture:{name}")
listen()
chunk_size = int(round(model_config.duration * model_config.sample_rate * 2))
key = f"{name}-audio"
audio_queue = process_info["audio_queue"]
frame_manager = SharedMemoryFrameManager()
current_frame = mp.Value("d", 0.0)
pipe = open(f"/tmp/{key}", "rb")
while not stop_event.is_set():
current_frame.value = datetime.datetime.now().timestamp()
frame_name = f"{key}{current_frame.value}"
frame_buffer = frame_manager.create(frame_name, chunk_size)
try:
frame_buffer[:] = pipe.read(chunk_size)
except Exception as e:
continue
# if the queue is full, skip this frame
if audio_queue.full():
frame_manager.delete(frame_name)
continue
# close the frame
frame_manager.close(frame_name)
# add to the queue
audio_queue.put(current_frame.value)
def process_audio(
name: str,
camera_config: CameraConfig,
model_config: AudioModelConfig,
labelmap,
detection_queue: mp.Queue,
result_connection,
process_info,
):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"process:{name}"
setproctitle(f"frigate.process:{name}")
listen()
shape = (int(round(model_config.duration * model_config.sample_rate)),)
key = f"{name}-audio"
audio_queue = process_info["audio_queue"]
frame_manager = SharedMemoryFrameManager()
detector = RemoteObjectDetector(
key, labelmap, detection_queue, result_connection, model_config
)
while not stop_event.is_set():
try:
frame_time = audio_queue.get(True, 10)
except queue.Empty:
continue
audio = frame_manager.get(f"{key}{frame_time}", shape, dtype=np.int16)
if audio is None:
logger.info(f"{key}: audio {frame_time} is not in memory store.")
continue
waveform = (audio / 32768.0).astype(np.float32)
model_detections = detector.detect(waveform)
for label, score, _ in model_detections:
if label not in camera_config.objects.track:
continue
filters = camera_config.objects.filters.get(label)
if filters:
if score < filters.min_score:
continue
logger.info(f"{label}: {score}")
frame_manager.close(f"{key}{frame_time}")

View File

@ -1,10 +1,13 @@
"""Handle creating audio events."""
import datetime
import logging
import multiprocessing as mp
import numpy as np
import os
import random
import signal
import string
import subprocess as sp
import threading
from types import FrameType
@ -20,6 +23,7 @@ from frigate.const import (
AUDIO_SAMPLE_RATE,
CACHE_DIR,
)
from frigate.events.maintainer import EventTypeEnum
from frigate.ffmpeg_presets import parse_preset_input
from frigate.object_detection import load_labels
from frigate.util import get_ffmpeg_arg_list, listen
@ -51,7 +55,7 @@ def listen_to_audio(config: FrigateConfig, event_queue: mp.Queue) -> None:
for camera in config.cameras.values():
if camera.enabled and camera.audio.enabled:
AudioEventMaintainer(camera, stop_event).start()
AudioEventMaintainer(camera, event_queue, stop_event).start()
class AudioTfl:
@ -110,10 +114,12 @@ class AudioTfl:
class AudioEventMaintainer(threading.Thread):
def __init__(self, camera: CameraConfig, stop_event: mp.Event) -> None:
def __init__(self, camera: CameraConfig, event_queue: mp.Queue, stop_event: mp.Event) -> None:
threading.Thread.__init__(self)
self.name = f"{camera.name}_audio_event_processor"
self.config = camera
self.queue = event_queue
self.detections: dict[dict[str, any]] = {}
self.stop_event = stop_event
self.detector = AudioTfl()
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
@ -140,8 +146,24 @@ class AudioEventMaintainer(threading.Thread):
if label not in self.config.audio.listen:
continue
logger.error(f"Detected audio: {label} with score {score}")
# TODO handle valid detect
self.handle_detection(label, score)
def handle_detection(self, label: str, score: float) -> None:
if self.detections[label] is not None:
self.detections[label]["last_detection"] = datetime.datetime.now().timestamp()
else:
now = datetime.datetime.now().timestamp()
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
event_id = f"{now}-{rand_id}"
self.detections[label] = {
"id": event_id,
"label": label,
"camera": self.config.name,
"start_time": now,
"last_detection": now,
}
self.queue.put((EventTypeEnum.audio, "start", self.config.name, self.detections[label]))
def init_ffmpeg(self) -> None:
try:

View File

@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
class EventTypeEnum(str, Enum):
api = "api"
# audio = "audio"
audio = "audio"
tracked_object = "tracked_object"
@ -90,6 +90,8 @@ class EventProcessor(threading.Thread):
continue
self.handle_object_detection(event_type, camera, event_data)
elif source_type == EventTypeEnum.audio:
self.handle_audio_detection(event_type, camera, event_data)
elif source_type == EventTypeEnum.api:
self.handle_external_detection(event_type, event_data)
@ -214,7 +216,35 @@ class EventProcessor(threading.Thread):
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
def handle_external_detection(self, type: str, event_data: Event):
def handle_audio_detection(self, type: str, event_data: Event) -> None:
if type == "new":
event = {
Event.id: event_data["id"],
Event.label: event_data["label"],
Event.camera: event_data["camera"],
Event.start_time: event_data["start_time"],
Event.thumbnail: None,
Event.has_clip: True,
Event.has_snapshot: True,
Event.zones: [],
Event.data: {},
}
elif type == "end":
event = {
Event.id: event_data["id"],
Event.end_time: event_data["end_time"],
}
(
Event.insert(event)
.on_conflict(
conflict_target=[Event.id],
update=event,
)
.execute()
)
def handle_external_detection(self, type: str, event_data: Event) -> None:
if type == "new":
event = {
Event.id: event_data["id"],