Make object classification publish to tracked object update and add examples for state classification

This commit is contained in:
Nicolas Mowen 2025-11-22 11:51:27 -07:00
parent bb844e43c2
commit 11ca1baddd
4 changed files with 76 additions and 1 deletions

View File

@ -159,11 +159,44 @@ Message published for updates to tracked object metadata, for example:
} }
``` ```
#### Object Classification Update
Message published when [object classification](/configuration/custom_classification/object_classification) reaches consensus on a classification result.
**Sub label type:**
```json
{
"type": "classification",
"id": "1607123955.475377-mxklsc",
"camera": "front_door_cam",
"timestamp": 1607123958.748393,
"model": "person_classifier",
"sub_label": "delivery_person",
"score": 0.87
}
```
**Attribute type:**
```json
{
"type": "classification",
"id": "1607123955.475377-mxklsc",
"camera": "front_door_cam",
"timestamp": 1607123958.748393,
"model": "helmet_detector",
"attribute": "yes",
"score": 0.92
}
```
### `frigate/reviews` ### `frigate/reviews`
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated.
An `update` with the same ID will be published when: An `update` with the same ID will be published when:
- The severity changes from `detection` to `alert` - The severity changes from `detection` to `alert`
- Additional objects are detected - Additional objects are detected
- An object is recognized via face, lpr, etc. - An object is recognized via face, lpr, etc.
@ -308,6 +341,11 @@ Publishes transcribed text for audio detected on this camera.
**NOTE:** Requires audio detection and transcription to be enabled **NOTE:** Requires audio detection and transcription to be enabled
### `frigate/<camera_name>/classification/<model_name>`
Publishes the current state detected by a state classification model for the camera. The topic name includes the model name as configured in your classification settings.
The published value is the detected state class name (e.g., `open`, `closed`, `on`, `off`). The state is only published when it changes, helping to reduce unnecessary MQTT traffic.
### `frigate/<camera_name>/enabled/set` ### `frigate/<camera_name>/enabled/set`
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`. Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.

View File

@ -1,6 +1,7 @@
"""Real time processor that works with classification tflite models.""" """Real time processor that works with classification tflite models."""
import datetime import datetime
import json
import logging import logging
import os import os
from typing import Any from typing import Any
@ -21,6 +22,7 @@ from frigate.config.classification import (
) )
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
from frigate.log import redirect_output_to_logger from frigate.log import redirect_output_to_logger
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
from frigate.util.object import box_overlaps, calculate_region from frigate.util.object import box_overlaps, calculate_region
@ -284,6 +286,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
config: FrigateConfig, config: FrigateConfig,
model_config: CustomClassificationConfig, model_config: CustomClassificationConfig,
sub_label_publisher: EventMetadataPublisher, sub_label_publisher: EventMetadataPublisher,
requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
): ):
super().__init__(config, metrics) super().__init__(config, metrics)
@ -292,6 +295,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
self.interpreter: Interpreter | None = None self.interpreter: Interpreter | None = None
self.sub_label_publisher = sub_label_publisher self.sub_label_publisher = sub_label_publisher
self.requestor = requestor
self.tensor_input_details: dict[str, Any] | None = None self.tensor_input_details: dict[str, Any] | None = None
self.tensor_output_details: dict[str, Any] | None = None self.tensor_output_details: dict[str, Any] | None = None
self.classification_history: dict[str, list[tuple[str, float, float]]] = {} self.classification_history: dict[str, list[tuple[str, float, float]]] = {}
@ -486,6 +490,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
) )
if consensus_label is not None: if consensus_label is not None:
camera = obj_data["camera"]
if ( if (
self.model_config.object_config.classification_type self.model_config.object_config.classification_type
== ObjectClassificationType.sub_label == ObjectClassificationType.sub_label
@ -494,6 +500,20 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
(object_id, consensus_label, consensus_score), (object_id, consensus_label, consensus_score),
EventMetadataTypeEnum.sub_label, EventMetadataTypeEnum.sub_label,
) )
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": now,
"model": self.model_config.name,
"sub_label": consensus_label,
"score": consensus_score,
}
),
)
elif ( elif (
self.model_config.object_config.classification_type self.model_config.object_config.classification_type
== ObjectClassificationType.attribute == ObjectClassificationType.attribute
@ -507,6 +527,20 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
), ),
EventMetadataTypeEnum.attribute.value, EventMetadataTypeEnum.attribute.value,
) )
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": now,
"model": self.model_config.name,
"attribute": consensus_label,
"score": consensus_score,
}
),
)
def handle_request(self, topic, request_data): def handle_request(self, topic, request_data):
if topic == EmbeddingsRequestEnum.reload_classification_model.value: if topic == EmbeddingsRequestEnum.reload_classification_model.value:

View File

@ -195,6 +195,7 @@ class EmbeddingMaintainer(threading.Thread):
self.config, self.config,
model_config, model_config,
self.event_metadata_publisher, self.event_metadata_publisher,
self.requestor,
self.metrics, self.metrics,
) )
) )
@ -339,6 +340,7 @@ class EmbeddingMaintainer(threading.Thread):
self.config, self.config,
model_config, model_config,
self.event_metadata_publisher, self.event_metadata_publisher,
self.requestor,
self.metrics, self.metrics,
) )

View File

@ -30,3 +30,4 @@ class TrackedObjectUpdateTypesEnum(str, Enum):
description = "description" description = "description"
face = "face" face = "face"
lpr = "lpr" lpr = "lpr"
classification = "classification"