2024-10-07 23:30:45 +03:00
|
|
|
"""Maintain embeddings in SQLite-vec."""
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
import base64
|
2025-03-23 22:30:48 +03:00
|
|
|
import datetime
|
2024-06-22 00:30:19 +03:00
|
|
|
import logging
|
|
|
|
|
import threading
|
|
|
|
|
from multiprocessing.synchronize import Event as MpEvent
|
2025-10-02 21:48:11 +03:00
|
|
|
from typing import Any
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
from peewee import DoesNotExist
|
|
|
|
|
|
2025-10-23 22:27:28 +03:00
|
|
|
from frigate.comms.config_updater import ConfigSubscriber
|
2025-03-23 22:30:48 +03:00
|
|
|
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
|
2025-07-07 17:03:57 +03:00
|
|
|
from frigate.comms.embeddings_updater import (
|
|
|
|
|
EmbeddingsRequestEnum,
|
|
|
|
|
EmbeddingsResponder,
|
|
|
|
|
)
|
2024-09-24 17:14:51 +03:00
|
|
|
from frigate.comms.event_metadata_updater import (
|
2025-03-11 01:29:29 +03:00
|
|
|
EventMetadataPublisher,
|
2024-09-24 17:14:51 +03:00
|
|
|
EventMetadataSubscriber,
|
|
|
|
|
EventMetadataTypeEnum,
|
|
|
|
|
)
|
2024-06-22 00:30:19 +03:00
|
|
|
from frigate.comms.events_updater import EventEndSubscriber, EventUpdateSubscriber
|
|
|
|
|
from frigate.comms.inter_process import InterProcessRequestor
|
2025-02-21 16:51:37 +03:00
|
|
|
from frigate.comms.recordings_updater import (
|
|
|
|
|
RecordingsDataSubscriber,
|
|
|
|
|
RecordingsDataTypeEnum,
|
|
|
|
|
)
|
2025-08-03 16:33:09 +03:00
|
|
|
from frigate.comms.review_updater import ReviewDataSubscriber
|
2025-10-02 21:48:11 +03:00
|
|
|
from frigate.config import FrigateConfig
|
2025-03-23 22:30:48 +03:00
|
|
|
from frigate.config.camera.camera import CameraTypeEnum
|
2025-06-11 20:25:30 +03:00
|
|
|
from frigate.config.camera.updater import (
|
|
|
|
|
CameraConfigUpdateEnum,
|
|
|
|
|
CameraConfigUpdateSubscriber,
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
from frigate.data_processing.common.license_plate.model import (
|
|
|
|
|
LicensePlateModelRunner,
|
|
|
|
|
)
|
|
|
|
|
from frigate.data_processing.post.api import PostProcessorApi
|
2025-05-27 18:26:00 +03:00
|
|
|
from frigate.data_processing.post.audio_transcription import (
|
|
|
|
|
AudioTranscriptionPostProcessor,
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
from frigate.data_processing.post.license_plate import (
|
|
|
|
|
LicensePlatePostProcessor,
|
|
|
|
|
)
|
2025-10-02 21:48:11 +03:00
|
|
|
from frigate.data_processing.post.object_descriptions import ObjectDescriptionProcessor
|
2025-08-03 16:33:09 +03:00
|
|
|
from frigate.data_processing.post.review_descriptions import ReviewDescriptionProcessor
|
2025-07-07 17:03:57 +03:00
|
|
|
from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor
|
2025-01-10 22:44:30 +03:00
|
|
|
from frigate.data_processing.real_time.api import RealTimeProcessorApi
|
2025-02-21 16:51:37 +03:00
|
|
|
from frigate.data_processing.real_time.bird import BirdRealTimeProcessor
|
2025-05-23 17:46:53 +03:00
|
|
|
from frigate.data_processing.real_time.custom_classification import (
|
|
|
|
|
CustomObjectClassificationProcessor,
|
|
|
|
|
CustomStateClassificationProcessor,
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
from frigate.data_processing.real_time.face import FaceRealTimeProcessor
|
|
|
|
|
from frigate.data_processing.real_time.license_plate import (
|
|
|
|
|
LicensePlateRealTimeProcessor,
|
2025-02-11 23:45:13 +03:00
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
2025-03-11 01:29:29 +03:00
|
|
|
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
|
2026-02-27 18:35:33 +03:00
|
|
|
from frigate.genai import GenAIClientManager
|
2025-08-13 01:27:35 +03:00
|
|
|
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
2024-10-10 18:42:24 +03:00
|
|
|
from frigate.util.builtin import serialize
|
2025-11-05 02:06:14 +03:00
|
|
|
from frigate.util.file import get_event_thumbnail_bytes
|
2025-10-02 21:48:11 +03:00
|
|
|
from frigate.util.image import SharedMemoryFrameManager
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2024-10-07 23:30:45 +03:00
|
|
|
from .embeddings import Embeddings
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
2024-10-20 23:14:51 +03:00
|
|
|
MAX_THUMBNAILS = 10
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
class EmbeddingMaintainer(threading.Thread):
|
|
|
|
|
"""Handle embedding queue and post event updates."""
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
config: FrigateConfig,
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
metrics: DataProcessorMetrics | None,
|
2024-06-22 00:30:19 +03:00
|
|
|
stop_event: MpEvent,
|
|
|
|
|
) -> None:
|
2024-10-11 00:37:43 +03:00
|
|
|
super().__init__(name="embeddings_maintainer")
|
2024-06-22 00:30:19 +03:00
|
|
|
self.config = config
|
2025-01-05 17:47:57 +03:00
|
|
|
self.metrics = metrics
|
2025-02-28 21:43:08 +03:00
|
|
|
self.embeddings = None
|
2025-06-11 20:25:30 +03:00
|
|
|
self.config_updater = CameraConfigUpdateSubscriber(
|
|
|
|
|
self.config,
|
|
|
|
|
self.config.cameras,
|
2025-07-07 17:03:57 +03:00
|
|
|
[
|
|
|
|
|
CameraConfigUpdateEnum.add,
|
|
|
|
|
CameraConfigUpdateEnum.remove,
|
2025-08-10 16:38:04 +03:00
|
|
|
CameraConfigUpdateEnum.object_genai,
|
|
|
|
|
CameraConfigUpdateEnum.review_genai,
|
2025-07-07 17:03:57 +03:00
|
|
|
CameraConfigUpdateEnum.semantic_search,
|
|
|
|
|
],
|
2025-06-11 20:25:30 +03:00
|
|
|
)
|
2025-10-23 22:27:28 +03:00
|
|
|
self.classification_config_subscriber = ConfigSubscriber(
|
|
|
|
|
"config/classification/custom/"
|
|
|
|
|
)
|
2024-10-11 00:37:43 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
# Configure Frigate DB
|
|
|
|
|
db = SqliteVecQueueDatabase(
|
|
|
|
|
config.database.path,
|
|
|
|
|
pragmas={
|
|
|
|
|
"auto_vacuum": "FULL", # Does not defragment database
|
|
|
|
|
"cache_size": -512 * 1000, # 512MB of cache
|
|
|
|
|
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
|
|
|
|
|
},
|
|
|
|
|
timeout=max(
|
|
|
|
|
60, 10 * len([c for c in config.cameras.values() if c.enabled])
|
|
|
|
|
),
|
|
|
|
|
load_vec_extension=True,
|
|
|
|
|
)
|
2025-08-13 01:27:35 +03:00
|
|
|
models = [Event, Recordings, ReviewSegment, Trigger]
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
db.bind(models)
|
|
|
|
|
|
2025-02-28 21:43:08 +03:00
|
|
|
if config.semantic_search.enabled:
|
|
|
|
|
self.embeddings = Embeddings(config, db, metrics)
|
|
|
|
|
|
|
|
|
|
# Check if we need to re-index events
|
|
|
|
|
if config.semantic_search.reindex:
|
|
|
|
|
self.embeddings.reindex()
|
2024-10-11 00:37:43 +03:00
|
|
|
|
2025-07-07 17:03:57 +03:00
|
|
|
# Sync semantic search triggers in db with config
|
|
|
|
|
self.embeddings.sync_triggers()
|
|
|
|
|
|
2025-02-21 16:51:37 +03:00
|
|
|
# create communication for updating event descriptions
|
|
|
|
|
self.requestor = InterProcessRequestor()
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
self.event_subscriber = EventUpdateSubscriber()
|
|
|
|
|
self.event_end_subscriber = EventEndSubscriber()
|
2025-03-11 01:29:29 +03:00
|
|
|
self.event_metadata_publisher = EventMetadataPublisher()
|
2024-09-24 17:14:51 +03:00
|
|
|
self.event_metadata_subscriber = EventMetadataSubscriber(
|
|
|
|
|
EventMetadataTypeEnum.regenerate_description
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
self.recordings_subscriber = RecordingsDataSubscriber(
|
2025-09-28 19:52:14 +03:00
|
|
|
RecordingsDataTypeEnum.saved
|
2025-02-21 16:51:37 +03:00
|
|
|
)
|
2025-08-03 16:33:09 +03:00
|
|
|
self.review_subscriber = ReviewDataSubscriber("")
|
2025-08-08 15:08:37 +03:00
|
|
|
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video.value)
|
2024-10-10 18:42:24 +03:00
|
|
|
self.embeddings_responder = EmbeddingsResponder()
|
2024-06-22 00:30:19 +03:00
|
|
|
self.frame_manager = SharedMemoryFrameManager()
|
2025-02-21 16:51:37 +03:00
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
self.detected_license_plates: dict[str, dict[str, Any]] = {}
|
2026-02-27 18:35:33 +03:00
|
|
|
self.genai_manager = GenAIClientManager(config)
|
2025-02-21 16:51:37 +03:00
|
|
|
|
|
|
|
|
# model runners to share between realtime and post processors
|
|
|
|
|
if self.config.lpr.enabled:
|
2025-04-08 04:30:08 +03:00
|
|
|
lpr_model_runner = LicensePlateModelRunner(
|
2025-04-15 18:40:12 +03:00
|
|
|
self.requestor,
|
|
|
|
|
device=self.config.lpr.device,
|
|
|
|
|
model_size=self.config.lpr.model_size,
|
2025-04-08 04:30:08 +03:00
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
|
|
|
|
|
# realtime processors
|
|
|
|
|
self.realtime_processors: list[RealTimeProcessorApi] = []
|
2024-10-23 01:05:48 +03:00
|
|
|
|
2025-01-10 18:39:24 +03:00
|
|
|
if self.config.face_recognition.enabled:
|
2025-11-03 19:05:03 +03:00
|
|
|
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
|
2025-03-11 01:29:29 +03:00
|
|
|
self.realtime_processors.append(
|
|
|
|
|
FaceRealTimeProcessor(
|
2025-04-29 18:02:50 +03:00
|
|
|
self.config, self.requestor, self.event_metadata_publisher, metrics
|
2025-03-11 01:29:29 +03:00
|
|
|
)
|
|
|
|
|
)
|
2025-11-03 19:05:03 +03:00
|
|
|
logger.debug("FaceRealTimeProcessor initialized successfully")
|
2024-10-23 01:05:48 +03:00
|
|
|
|
2025-01-13 18:09:04 +03:00
|
|
|
if self.config.classification.bird.enabled:
|
2025-03-11 01:29:29 +03:00
|
|
|
self.realtime_processors.append(
|
|
|
|
|
BirdRealTimeProcessor(
|
|
|
|
|
self.config, self.event_metadata_publisher, metrics
|
|
|
|
|
)
|
|
|
|
|
)
|
2025-01-13 18:09:04 +03:00
|
|
|
|
2025-02-11 23:45:13 +03:00
|
|
|
if self.config.lpr.enabled:
|
2025-02-21 16:51:37 +03:00
|
|
|
self.realtime_processors.append(
|
|
|
|
|
LicensePlateRealTimeProcessor(
|
2025-03-11 01:29:29 +03:00
|
|
|
self.config,
|
2025-04-29 01:43:03 +03:00
|
|
|
self.requestor,
|
2025-03-11 01:29:29 +03:00
|
|
|
self.event_metadata_publisher,
|
|
|
|
|
metrics,
|
|
|
|
|
lpr_model_runner,
|
|
|
|
|
self.detected_license_plates,
|
2025-02-21 16:51:37 +03:00
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
2025-05-30 02:51:32 +03:00
|
|
|
for model_config in self.config.classification.custom.values():
|
2025-05-23 17:46:53 +03:00
|
|
|
self.realtime_processors.append(
|
2025-05-24 19:18:46 +03:00
|
|
|
CustomStateClassificationProcessor(
|
2025-05-30 02:51:32 +03:00
|
|
|
self.config, model_config, self.requestor, self.metrics
|
2025-05-24 19:18:46 +03:00
|
|
|
)
|
|
|
|
|
if model_config.state_config != None
|
2025-05-23 17:46:53 +03:00
|
|
|
else CustomObjectClassificationProcessor(
|
|
|
|
|
self.config,
|
2025-05-24 19:18:46 +03:00
|
|
|
model_config,
|
2025-05-23 17:46:53 +03:00
|
|
|
self.event_metadata_publisher,
|
2025-11-23 18:40:25 +03:00
|
|
|
self.requestor,
|
2025-05-23 17:46:53 +03:00
|
|
|
self.metrics,
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
2025-02-21 16:51:37 +03:00
|
|
|
# post processors
|
|
|
|
|
self.post_processors: list[PostProcessorApi] = []
|
|
|
|
|
|
2026-02-27 18:35:33 +03:00
|
|
|
if self.genai_manager.vision_client is not None and any(
|
2025-12-24 17:03:09 +03:00
|
|
|
c.review.genai.enabled_in_config for c in self.config.cameras.values()
|
|
|
|
|
):
|
2025-08-10 14:57:54 +03:00
|
|
|
self.post_processors.append(
|
|
|
|
|
ReviewDescriptionProcessor(
|
2026-02-27 18:35:33 +03:00
|
|
|
self.config,
|
|
|
|
|
self.requestor,
|
|
|
|
|
self.metrics,
|
|
|
|
|
self.genai_manager.vision_client,
|
2025-08-10 14:57:54 +03:00
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
2025-02-21 16:51:37 +03:00
|
|
|
if self.config.lpr.enabled:
|
|
|
|
|
self.post_processors.append(
|
|
|
|
|
LicensePlatePostProcessor(
|
2025-03-11 01:29:29 +03:00
|
|
|
self.config,
|
2025-04-29 01:43:03 +03:00
|
|
|
self.requestor,
|
2025-03-11 01:29:29 +03:00
|
|
|
self.event_metadata_publisher,
|
|
|
|
|
metrics,
|
|
|
|
|
lpr_model_runner,
|
|
|
|
|
self.detected_license_plates,
|
2025-02-21 16:51:37 +03:00
|
|
|
)
|
|
|
|
|
)
|
2025-02-11 23:45:13 +03:00
|
|
|
|
2025-10-09 02:06:03 +03:00
|
|
|
if self.config.audio_transcription.enabled and any(
|
2025-05-27 18:26:00 +03:00
|
|
|
c.enabled_in_config and c.audio_transcription.enabled
|
|
|
|
|
for c in self.config.cameras.values()
|
|
|
|
|
):
|
|
|
|
|
self.post_processors.append(
|
2025-11-07 16:53:27 +03:00
|
|
|
AudioTranscriptionPostProcessor(
|
|
|
|
|
self.config, self.requestor, self.embeddings, metrics
|
|
|
|
|
)
|
2025-05-27 18:26:00 +03:00
|
|
|
)
|
|
|
|
|
|
2025-10-02 21:48:11 +03:00
|
|
|
semantic_trigger_processor: SemanticTriggerProcessor | None = None
|
2025-07-07 17:03:57 +03:00
|
|
|
if self.config.semantic_search.enabled:
|
2025-10-02 21:48:11 +03:00
|
|
|
semantic_trigger_processor = SemanticTriggerProcessor(
|
|
|
|
|
db,
|
|
|
|
|
self.config,
|
|
|
|
|
self.requestor,
|
2025-10-29 00:13:04 +03:00
|
|
|
self.event_metadata_publisher,
|
2025-10-02 21:48:11 +03:00
|
|
|
metrics,
|
|
|
|
|
self.embeddings,
|
|
|
|
|
)
|
|
|
|
|
self.post_processors.append(semantic_trigger_processor)
|
|
|
|
|
|
2026-02-27 18:35:33 +03:00
|
|
|
if self.genai_manager.vision_client is not None and any(
|
2025-12-24 17:03:09 +03:00
|
|
|
c.objects.genai.enabled_in_config for c in self.config.cameras.values()
|
|
|
|
|
):
|
2025-07-07 17:03:57 +03:00
|
|
|
self.post_processors.append(
|
2025-10-02 21:48:11 +03:00
|
|
|
ObjectDescriptionProcessor(
|
2025-07-07 17:03:57 +03:00
|
|
|
self.config,
|
|
|
|
|
self.embeddings,
|
2025-10-02 21:48:11 +03:00
|
|
|
self.requestor,
|
|
|
|
|
self.metrics,
|
2026-02-27 18:35:33 +03:00
|
|
|
self.genai_manager.vision_client,
|
2025-10-02 21:48:11 +03:00
|
|
|
semantic_trigger_processor,
|
2025-07-07 17:03:57 +03:00
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
self.stop_event = stop_event
|
|
|
|
|
|
2025-02-21 16:51:37 +03:00
|
|
|
# recordings data
|
|
|
|
|
self.recordings_available_through: dict[str, float] = {}
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
def run(self) -> None:
|
2024-10-07 23:30:45 +03:00
|
|
|
"""Maintain a SQLite-vec database for semantic search."""
|
2024-06-22 00:30:19 +03:00
|
|
|
while not self.stop_event.is_set():
|
2025-06-11 20:25:30 +03:00
|
|
|
self.config_updater.check_for_updates()
|
2025-10-23 22:27:28 +03:00
|
|
|
self._check_classification_config_updates()
|
2024-10-10 18:42:24 +03:00
|
|
|
self._process_requests()
|
2024-06-22 00:30:19 +03:00
|
|
|
self._process_updates()
|
2025-02-21 16:51:37 +03:00
|
|
|
self._process_recordings_updates()
|
2025-08-03 16:33:09 +03:00
|
|
|
self._process_review_updates()
|
2025-05-23 17:46:53 +03:00
|
|
|
self._process_frame_updates()
|
2025-03-23 22:30:48 +03:00
|
|
|
self._expire_dedicated_lpr()
|
2024-06-22 00:30:19 +03:00
|
|
|
self._process_finalized()
|
2024-09-24 17:14:51 +03:00
|
|
|
self._process_event_metadata()
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2025-06-11 20:25:30 +03:00
|
|
|
self.config_updater.stop()
|
2025-10-23 22:27:28 +03:00
|
|
|
self.classification_config_subscriber.stop()
|
2024-06-22 00:30:19 +03:00
|
|
|
self.event_subscriber.stop()
|
|
|
|
|
self.event_end_subscriber.stop()
|
2025-02-21 16:51:37 +03:00
|
|
|
self.recordings_subscriber.stop()
|
2025-03-23 22:30:48 +03:00
|
|
|
self.detection_subscriber.stop()
|
2025-03-11 01:29:29 +03:00
|
|
|
self.event_metadata_publisher.stop()
|
2024-09-24 17:14:51 +03:00
|
|
|
self.event_metadata_subscriber.stop()
|
2024-10-10 18:42:24 +03:00
|
|
|
self.embeddings_responder.stop()
|
2024-06-22 00:30:19 +03:00
|
|
|
self.requestor.stop()
|
|
|
|
|
logger.info("Exiting embeddings maintenance...")
|
|
|
|
|
|
2025-10-23 22:27:28 +03:00
|
|
|
def _check_classification_config_updates(self) -> None:
|
2025-11-03 18:01:30 +03:00
|
|
|
"""Check for classification config updates and add/remove processors."""
|
2025-10-23 22:27:28 +03:00
|
|
|
topic, model_config = self.classification_config_subscriber.check_for_update()
|
|
|
|
|
|
2025-11-03 18:01:30 +03:00
|
|
|
if topic:
|
2025-10-23 22:27:28 +03:00
|
|
|
model_name = topic.split("/")[-1]
|
2025-11-03 18:01:30 +03:00
|
|
|
|
|
|
|
|
if model_config is None:
|
|
|
|
|
self.realtime_processors = [
|
|
|
|
|
processor
|
|
|
|
|
for processor in self.realtime_processors
|
|
|
|
|
if not (
|
|
|
|
|
isinstance(
|
|
|
|
|
processor,
|
|
|
|
|
(
|
|
|
|
|
CustomStateClassificationProcessor,
|
|
|
|
|
CustomObjectClassificationProcessor,
|
|
|
|
|
),
|
2025-10-23 22:27:28 +03:00
|
|
|
)
|
2025-11-03 18:01:30 +03:00
|
|
|
and processor.model_config.name == model_name
|
|
|
|
|
)
|
|
|
|
|
]
|
2025-10-23 22:27:28 +03:00
|
|
|
|
2025-11-03 18:01:30 +03:00
|
|
|
logger.info(
|
|
|
|
|
f"Successfully removed classification processor for model: {model_name}"
|
2025-10-23 22:27:28 +03:00
|
|
|
)
|
|
|
|
|
else:
|
2025-11-03 18:01:30 +03:00
|
|
|
self.config.classification.custom[model_name] = model_config
|
|
|
|
|
|
|
|
|
|
# Check if processor already exists
|
|
|
|
|
for processor in self.realtime_processors:
|
|
|
|
|
if isinstance(
|
|
|
|
|
processor,
|
|
|
|
|
(
|
|
|
|
|
CustomStateClassificationProcessor,
|
|
|
|
|
CustomObjectClassificationProcessor,
|
|
|
|
|
),
|
|
|
|
|
):
|
|
|
|
|
if processor.model_config.name == model_name:
|
|
|
|
|
logger.debug(
|
|
|
|
|
f"Classification processor for model {model_name} already exists, skipping"
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if model_config.state_config is not None:
|
|
|
|
|
processor = CustomStateClassificationProcessor(
|
|
|
|
|
self.config, model_config, self.requestor, self.metrics
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
processor = CustomObjectClassificationProcessor(
|
|
|
|
|
self.config,
|
|
|
|
|
model_config,
|
|
|
|
|
self.event_metadata_publisher,
|
2025-11-23 18:40:25 +03:00
|
|
|
self.requestor,
|
2025-11-03 18:01:30 +03:00
|
|
|
self.metrics,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.realtime_processors.append(processor)
|
|
|
|
|
logger.info(
|
|
|
|
|
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
2025-10-23 22:27:28 +03:00
|
|
|
)
|
|
|
|
|
|
2024-10-10 18:42:24 +03:00
|
|
|
def _process_requests(self) -> None:
|
|
|
|
|
"""Process embeddings requests"""
|
|
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
def _handle_request(topic: str, data: dict[str, Any]) -> str:
|
2024-10-11 00:37:43 +03:00
|
|
|
try:
|
2025-02-28 21:43:08 +03:00
|
|
|
# First handle the embedding-specific topics when semantic search is enabled
|
|
|
|
|
if self.config.semantic_search.enabled:
|
|
|
|
|
if topic == EmbeddingsRequestEnum.embed_description.value:
|
|
|
|
|
return serialize(
|
|
|
|
|
self.embeddings.embed_description(
|
|
|
|
|
data["id"], data["description"]
|
|
|
|
|
),
|
|
|
|
|
pack=False,
|
|
|
|
|
)
|
|
|
|
|
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
|
|
|
|
|
thumbnail = base64.b64decode(data["thumbnail"])
|
|
|
|
|
return serialize(
|
|
|
|
|
self.embeddings.embed_thumbnail(data["id"], thumbnail),
|
|
|
|
|
pack=False,
|
|
|
|
|
)
|
|
|
|
|
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
|
|
|
|
return serialize(
|
|
|
|
|
self.embeddings.embed_description("", data, upsert=False),
|
|
|
|
|
pack=False,
|
|
|
|
|
)
|
2025-03-27 20:29:34 +03:00
|
|
|
elif topic == EmbeddingsRequestEnum.reindex.value:
|
|
|
|
|
response = self.embeddings.start_reindex()
|
|
|
|
|
return "started" if response else "in_progress"
|
2025-03-24 17:12:42 +03:00
|
|
|
|
2025-02-28 21:43:08 +03:00
|
|
|
processors = [self.realtime_processors, self.post_processors]
|
|
|
|
|
for processor_list in processors:
|
|
|
|
|
for processor in processor_list:
|
|
|
|
|
resp = processor.handle_request(topic, data)
|
2025-01-10 18:39:24 +03:00
|
|
|
if resp is not None:
|
|
|
|
|
return resp
|
2025-03-24 17:12:42 +03:00
|
|
|
|
2025-09-26 05:05:22 +03:00
|
|
|
logger.error(f"No processor handled the topic {topic}")
|
2025-03-24 17:12:42 +03:00
|
|
|
return None
|
2024-10-11 00:37:43 +03:00
|
|
|
except Exception as e:
|
2025-02-21 16:51:37 +03:00
|
|
|
logger.error(f"Unable to handle embeddings request {e}", exc_info=True)
|
2024-10-11 00:37:43 +03:00
|
|
|
|
|
|
|
|
self.embeddings_responder.check_for_request(_handle_request)
|
2024-10-10 18:42:24 +03:00
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
def _process_updates(self) -> None:
|
|
|
|
|
"""Process event updates"""
|
2025-04-30 17:29:16 +03:00
|
|
|
update = self.event_subscriber.check_for_update()
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
if update is None:
|
|
|
|
|
return
|
|
|
|
|
|
2024-11-19 21:20:04 +03:00
|
|
|
source_type, _, camera, frame_name, data = update
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2025-11-04 03:42:59 +03:00
|
|
|
logger.debug(
|
|
|
|
|
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
|
|
|
|
|
)
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
if not camera or source_type != EventTypeEnum.tracked_object:
|
2025-11-04 03:42:59 +03:00
|
|
|
logger.debug(
|
|
|
|
|
f"Skipping update - camera: {camera}, source_type: {source_type}"
|
|
|
|
|
)
|
2024-06-22 00:30:19 +03:00
|
|
|
return
|
|
|
|
|
|
2025-03-29 14:58:50 +03:00
|
|
|
if self.config.semantic_search.enabled:
|
|
|
|
|
self.embeddings.update_stats()
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
camera_config = self.config.cameras[camera]
|
2024-10-20 23:14:51 +03:00
|
|
|
|
2025-10-02 21:48:11 +03:00
|
|
|
# no need to process updated objects if no processors are active
|
|
|
|
|
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
2025-11-04 03:42:59 +03:00
|
|
|
logger.debug(
|
|
|
|
|
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
|
|
|
|
|
)
|
2024-10-23 01:05:48 +03:00
|
|
|
return
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
# Create our own thumbnail based on the bounding box and the frame time
|
|
|
|
|
try:
|
2024-11-26 23:41:49 +03:00
|
|
|
yuv_frame = self.frame_manager.get(
|
|
|
|
|
frame_name, camera_config.frame_shape_yuv
|
|
|
|
|
)
|
2024-10-23 01:05:48 +03:00
|
|
|
except FileNotFoundError:
|
2025-11-04 03:42:59 +03:00
|
|
|
logger.debug(f"Frame {frame_name} not found for camera {camera}")
|
2024-10-23 01:05:48 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
if yuv_frame is None:
|
|
|
|
|
logger.debug(
|
|
|
|
|
"Unable to process object update because frame is unavailable."
|
2024-11-19 21:20:04 +03:00
|
|
|
)
|
2024-10-23 01:05:48 +03:00
|
|
|
return
|
2024-09-03 19:22:30 +03:00
|
|
|
|
2025-11-04 03:42:59 +03:00
|
|
|
logger.debug(
|
|
|
|
|
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
for processor in self.realtime_processors:
|
2025-11-04 03:42:59 +03:00
|
|
|
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
|
2025-01-10 18:39:24 +03:00
|
|
|
processor.process_frame(data, yuv_frame)
|
2024-10-20 23:14:51 +03:00
|
|
|
|
2025-10-02 21:48:11 +03:00
|
|
|
for processor in self.post_processors:
|
|
|
|
|
if isinstance(processor, ObjectDescriptionProcessor):
|
|
|
|
|
processor.process_data(
|
|
|
|
|
{
|
|
|
|
|
"camera": camera,
|
|
|
|
|
"data": data,
|
|
|
|
|
"state": "update",
|
|
|
|
|
"yuv_frame": yuv_frame,
|
|
|
|
|
},
|
|
|
|
|
PostProcessDataEnum.tracked_object,
|
|
|
|
|
)
|
2025-03-04 19:23:51 +03:00
|
|
|
|
2024-10-23 01:05:48 +03:00
|
|
|
self.frame_manager.close(frame_name)
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
def _process_finalized(self) -> None:
|
|
|
|
|
"""Process the end of an event."""
|
|
|
|
|
while True:
|
2025-04-30 17:29:16 +03:00
|
|
|
ended = self.event_end_subscriber.check_for_update()
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
if ended == None:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
event_id, camera, updated_db = ended
|
|
|
|
|
|
2025-02-21 16:51:37 +03:00
|
|
|
# expire in realtime processors
|
|
|
|
|
for processor in self.realtime_processors:
|
2025-05-11 21:03:53 +03:00
|
|
|
processor.expire_object(event_id, camera)
|
2024-10-23 18:03:18 +03:00
|
|
|
|
2025-10-02 21:48:11 +03:00
|
|
|
thumbnail: bytes | None = None
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
if updated_db:
|
|
|
|
|
try:
|
|
|
|
|
event: Event = Event.get(Event.id == event_id)
|
|
|
|
|
except DoesNotExist:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Skip the event if not an object
|
|
|
|
|
if event.data.get("type") != "object":
|
|
|
|
|
continue
|
|
|
|
|
|
2024-10-07 23:30:45 +03:00
|
|
|
# Extract valid thumbnail
|
2025-02-18 17:46:29 +03:00
|
|
|
thumbnail = get_event_thumbnail_bytes(event)
|
2024-06-22 00:30:19 +03:00
|
|
|
|
|
|
|
|
# Embed the thumbnail
|
2024-10-07 23:30:45 +03:00
|
|
|
self._embed_thumbnail(event_id, thumbnail)
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2025-07-07 17:03:57 +03:00
|
|
|
# call any defined post processors
|
|
|
|
|
for processor in self.post_processors:
|
|
|
|
|
if isinstance(processor, LicensePlatePostProcessor):
|
|
|
|
|
recordings_available = self.recordings_available_through.get(camera)
|
|
|
|
|
if (
|
|
|
|
|
recordings_available is not None
|
|
|
|
|
and event_id in self.detected_license_plates
|
|
|
|
|
and self.config.cameras[camera].type != "lpr"
|
|
|
|
|
):
|
|
|
|
|
processor.process_data(
|
|
|
|
|
{
|
|
|
|
|
"event_id": event_id,
|
|
|
|
|
"camera": camera,
|
|
|
|
|
"recordings_available": self.recordings_available_through[
|
|
|
|
|
camera
|
|
|
|
|
],
|
|
|
|
|
"obj_data": self.detected_license_plates[event_id][
|
|
|
|
|
"obj_data"
|
|
|
|
|
],
|
|
|
|
|
},
|
|
|
|
|
PostProcessDataEnum.recording,
|
|
|
|
|
)
|
|
|
|
|
elif isinstance(processor, AudioTranscriptionPostProcessor):
|
|
|
|
|
continue
|
|
|
|
|
elif isinstance(processor, SemanticTriggerProcessor):
|
|
|
|
|
processor.process_data(
|
|
|
|
|
{"event_id": event_id, "camera": camera, "type": "image"},
|
|
|
|
|
PostProcessDataEnum.tracked_object,
|
|
|
|
|
)
|
2025-10-02 21:48:11 +03:00
|
|
|
elif isinstance(processor, ObjectDescriptionProcessor):
|
|
|
|
|
if not updated_db:
|
2025-12-21 03:30:34 +03:00
|
|
|
# Still need to cleanup tracked events even if not processing
|
|
|
|
|
processor.cleanup_event(event_id)
|
2025-10-02 21:48:11 +03:00
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
processor.process_data(
|
|
|
|
|
{
|
|
|
|
|
"event": event,
|
|
|
|
|
"camera": camera,
|
|
|
|
|
"state": "finalize",
|
|
|
|
|
"thumbnail": thumbnail,
|
|
|
|
|
},
|
|
|
|
|
PostProcessDataEnum.tracked_object,
|
|
|
|
|
)
|
2025-07-07 17:03:57 +03:00
|
|
|
else:
|
|
|
|
|
processor.process_data(
|
|
|
|
|
{"event_id": event_id, "camera": camera},
|
|
|
|
|
PostProcessDataEnum.tracked_object,
|
|
|
|
|
)
|
|
|
|
|
|
2025-03-23 22:30:48 +03:00
|
|
|
def _expire_dedicated_lpr(self) -> None:
|
|
|
|
|
"""Remove plates not seen for longer than expiration timeout for dedicated lpr cameras."""
|
|
|
|
|
now = datetime.datetime.now().timestamp()
|
|
|
|
|
|
|
|
|
|
to_remove = []
|
|
|
|
|
|
|
|
|
|
for id, data in self.detected_license_plates.items():
|
|
|
|
|
last_seen = data.get("last_seen", 0)
|
|
|
|
|
if not last_seen:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if now - last_seen > self.config.cameras[data["camera"]].lpr.expire_time:
|
|
|
|
|
to_remove.append(id)
|
|
|
|
|
for id in to_remove:
|
|
|
|
|
self.event_metadata_publisher.publish(
|
|
|
|
|
(id, now),
|
2025-08-08 15:08:37 +03:00
|
|
|
EventMetadataTypeEnum.manual_event_end.value,
|
2025-03-23 22:30:48 +03:00
|
|
|
)
|
|
|
|
|
self.detected_license_plates.pop(id)
|
|
|
|
|
|
2025-02-21 16:51:37 +03:00
|
|
|
def _process_recordings_updates(self) -> None:
|
|
|
|
|
"""Process recordings updates."""
|
|
|
|
|
while True:
|
2025-09-28 19:52:14 +03:00
|
|
|
update = self.recordings_subscriber.check_for_update()
|
2025-02-21 16:51:37 +03:00
|
|
|
|
2025-09-28 19:52:14 +03:00
|
|
|
if not update:
|
2025-02-21 16:51:37 +03:00
|
|
|
break
|
|
|
|
|
|
2025-09-28 19:52:14 +03:00
|
|
|
(raw_topic, payload) = update
|
2025-02-21 16:51:37 +03:00
|
|
|
|
2025-09-28 19:52:14 +03:00
|
|
|
if not raw_topic or not payload:
|
|
|
|
|
break
|
2025-02-21 16:51:37 +03:00
|
|
|
|
2025-09-28 19:52:14 +03:00
|
|
|
topic = str(raw_topic)
|
|
|
|
|
|
|
|
|
|
if topic.endswith(RecordingsDataTypeEnum.saved.value):
|
|
|
|
|
camera, recordings_available_through_timestamp, _ = payload
|
|
|
|
|
|
|
|
|
|
self.recordings_available_through[camera] = (
|
|
|
|
|
recordings_available_through_timestamp
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
|
f"{camera} now has recordings available through {recordings_available_through_timestamp}"
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
|
2025-08-03 16:33:09 +03:00
|
|
|
def _process_review_updates(self) -> None:
|
|
|
|
|
"""Process review updates."""
|
|
|
|
|
while True:
|
|
|
|
|
review_updates = self.review_subscriber.check_for_update()
|
|
|
|
|
|
|
|
|
|
if review_updates == None:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
for processor in self.post_processors:
|
|
|
|
|
if isinstance(processor, ReviewDescriptionProcessor):
|
|
|
|
|
processor.process_data(review_updates, PostProcessDataEnum.review)
|
|
|
|
|
|
2024-09-24 17:14:51 +03:00
|
|
|
def _process_event_metadata(self):
|
|
|
|
|
# Check for regenerate description requests
|
2025-04-30 17:29:16 +03:00
|
|
|
(topic, payload) = self.event_metadata_subscriber.check_for_update()
|
2024-09-24 17:14:51 +03:00
|
|
|
|
|
|
|
|
if topic is None:
|
|
|
|
|
return
|
|
|
|
|
|
2025-06-07 21:43:29 +03:00
|
|
|
event_id, source, force = payload
|
2025-03-11 01:29:29 +03:00
|
|
|
|
2024-09-24 17:14:51 +03:00
|
|
|
if event_id:
|
2025-10-02 21:48:11 +03:00
|
|
|
for processor in self.post_processors:
|
|
|
|
|
if isinstance(processor, ObjectDescriptionProcessor):
|
|
|
|
|
processor.handle_request(
|
|
|
|
|
"regenerate_description",
|
|
|
|
|
{
|
|
|
|
|
"event_id": event_id,
|
|
|
|
|
"source": RegenerateDescriptionEnum(source),
|
|
|
|
|
"force": force,
|
|
|
|
|
},
|
|
|
|
|
)
|
2024-09-24 17:14:51 +03:00
|
|
|
|
2025-05-23 17:46:53 +03:00
|
|
|
def _process_frame_updates(self) -> None:
|
2025-03-23 22:30:48 +03:00
|
|
|
"""Process event updates"""
|
2025-04-30 17:29:16 +03:00
|
|
|
(topic, data) = self.detection_subscriber.check_for_update()
|
2025-03-23 22:30:48 +03:00
|
|
|
|
|
|
|
|
if topic is None:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
camera, frame_name, _, _, motion_boxes, _ = data
|
|
|
|
|
|
2026-02-03 17:31:00 +03:00
|
|
|
if not camera or camera not in self.config.cameras:
|
2025-03-23 22:30:48 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
camera_config = self.config.cameras[camera]
|
2025-05-25 20:02:48 +03:00
|
|
|
dedicated_lpr_enabled = (
|
|
|
|
|
camera_config.type == CameraTypeEnum.lpr
|
|
|
|
|
and "license_plate" not in camera_config.objects.track
|
|
|
|
|
)
|
2025-03-23 22:30:48 +03:00
|
|
|
|
2025-05-25 20:02:48 +03:00
|
|
|
if not dedicated_lpr_enabled and len(self.config.classification.custom) == 0:
|
2025-05-23 17:46:53 +03:00
|
|
|
# no active features that use this data
|
2025-03-23 22:30:48 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
yuv_frame = self.frame_manager.get(
|
|
|
|
|
frame_name, camera_config.frame_shape_yuv
|
|
|
|
|
)
|
|
|
|
|
except FileNotFoundError:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
if yuv_frame is None:
|
|
|
|
|
logger.debug(
|
|
|
|
|
"Unable to process dedicated LPR update because frame is unavailable."
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
for processor in self.realtime_processors:
|
2026-02-03 17:31:00 +03:00
|
|
|
if (
|
|
|
|
|
dedicated_lpr_enabled
|
|
|
|
|
and len(motion_boxes) > 0
|
|
|
|
|
and isinstance(processor, LicensePlateRealTimeProcessor)
|
2025-05-25 20:02:48 +03:00
|
|
|
):
|
2025-03-23 22:30:48 +03:00
|
|
|
processor.process_frame(camera, yuv_frame, True)
|
|
|
|
|
|
2025-05-23 17:46:53 +03:00
|
|
|
if isinstance(processor, CustomStateClassificationProcessor):
|
2025-05-24 19:18:46 +03:00
|
|
|
processor.process_frame(
|
|
|
|
|
{"camera": camera, "motion": motion_boxes}, yuv_frame
|
|
|
|
|
)
|
2025-05-23 17:46:53 +03:00
|
|
|
|
2025-03-23 22:30:48 +03:00
|
|
|
self.frame_manager.close(frame_name)
|
|
|
|
|
|
2024-10-07 23:30:45 +03:00
|
|
|
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
|
2024-06-22 00:30:19 +03:00
|
|
|
"""Embed the thumbnail for an event."""
|
2025-02-28 21:43:08 +03:00
|
|
|
if not self.config.semantic_search.enabled:
|
|
|
|
|
return
|
|
|
|
|
|
2024-10-22 01:19:34 +03:00
|
|
|
self.embeddings.embed_thumbnail(event_id, thumbnail)
|